{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T17:48:28Z","timestamp":1772905708080,"version":"3.50.1"},"publisher-location":"Cham","reference-count":74,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031200700","type":"print"},{"value":"9783031200717","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-20071-7_3","type":"book-chapter","created":{"date-parts":[[2022,11,12]],"date-time":"2022-11-12T05:15:09Z","timestamp":1668230109000},"page":"34-52","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":28,"title":["Spike Transformer: Monocular Depth Estimation for\u00a0Spiking Camera"],"prefix":"10.1007","author":[{"given":"Jiyuan","family":"Zhang","sequence":"first","affiliation":[]},{"given":"Lulu","family":"Tang","sequence":"additional","affiliation":[]},{"given":"Zhaofei","family":"Yu","sequence":"additional","affiliation":[]},{"given":"Jiwen","family":"Lu","sequence":"additional","affiliation":[]},{"given":"Tiejun","family":"Huang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,13]]},"reference":[{"key":"3_CR1","doi-asserted-by":"crossref","unstructured":"Arnab, A., Dehghani, M., Heigold, G., Sun, C., Lu\u010di\u0107, M., Schmid, C.: ViViT: a video vision transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 6836\u20136846 (2021)","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"3_CR2","unstructured":"Bao, H., Dong, L., Wei, F.: BEiT: BERT pre-training of image transformers. arXiv preprint arXiv:2106.08254 (2021)"},{"key":"3_CR3","unstructured":"Baudron, A., Wang, Z.W., Cossairt, O., Katsaggelos, A.K.: E3D: event-based 3D shape reconstruction. arXiv preprint arXiv:2012.05214 (2020)"},{"key":"3_CR4","unstructured":"Bertasius, G., Wang, H., Torresani, L.: Is space-time attention all you need for video understanding. arXiv preprint arXiv:2102.05095 (2021)"},{"key":"3_CR5","unstructured":"Bhat, S.F., Alhashim, I., Wonka, P.: AdaBins: depth estimation using adaptive bins. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4009\u20134018 (2021)"},{"key":"3_CR6","unstructured":"Brown, T., et al.: Language models are few-shot learners. Adv. Neural Inf. Process. Syst. (NeurIPS) 33, 1877\u20131901 (2020)"},{"key":"3_CR7","doi-asserted-by":"crossref","unstructured":"Chaney, K., Zhu, A.Z., Daniilidis, K.: Learning event-based height from plane and parallax. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPR) (2019)","DOI":"10.1109\/CVPRW.2019.00206"},{"key":"3_CR8","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"},{"key":"3_CR9","doi-asserted-by":"crossref","unstructured":"Dong, S., Huang, T., Tian, Y.: Spike camera and its coding methods. In: 2017 Data Compression Conference (DCC), pp. 437\u2013437 (2017)","DOI":"10.1109\/DCC.2017.69"},{"key":"3_CR10","doi-asserted-by":"crossref","unstructured":"Dong, S., Zhu, L., Xu, D., Tian, Y., Huang, T.: An efficient coding method for spike camera using inter-spike intervals. In: 2019 Data Compression Conference (DCC), pp. 568\u2013568. IEEE (2019)","DOI":"10.1109\/DCC.2019.00080"},{"key":"3_CR11","unstructured":"Dosovitskiy, A., et al.: An image is worth 16\u00a0$$\\times $$\u00a016 words: transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"3_CR12","unstructured":"Dosovitskiy, A., Ros, G., Codevilla, F., Lopez, A., Koltun, V.: CARLA: an open urban driving simulator. In: Conference on Robot Learning, pp. 1\u201316 (2017)"},{"key":"3_CR13","doi-asserted-by":"crossref","unstructured":"Eigen, D., Fergus, R.: Predicting depth, surface normals and semantic labels with a common multi-scale convolutional architecture. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV) (2015)","DOI":"10.1109\/ICCV.2015.304"},{"key":"3_CR14","doi-asserted-by":"crossref","unstructured":"Fu, H., Gong, M., Wang, C., Batmanghelich, K., Tao, D.: Deep ordinal regression network for monocular depth estimation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2002\u20132011 (2018)","DOI":"10.1109\/CVPR.2018.00214"},{"key":"3_CR15","doi-asserted-by":"crossref","unstructured":"Gallego, G., et al.: Event-based vision: a survey. IEEE Trans. Patt. Anal. Mach. Intell. 44(1), 154\u2013180 (2020)","DOI":"10.1109\/TPAMI.2020.3008413"},{"key":"3_CR16","doi-asserted-by":"crossref","unstructured":"Gallego, G., Gehrig, M., Scaramuzza, D.: Focus is all you need: loss functions for event-based vision. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 12280\u201312289 (2019)","DOI":"10.1109\/CVPR.2019.01256"},{"key":"3_CR17","doi-asserted-by":"crossref","unstructured":"Gallego, G., Rebecq, H., Scaramuzza, D.: A unifying contrast maximization framework for event cameras, with applications to motion, depth, and optical flow estimation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3867\u20133876 (2018)","DOI":"10.1109\/CVPR.2018.00407"},{"key":"3_CR18","doi-asserted-by":"crossref","unstructured":"Gehrig, D., Gehrig, M., Hidalgo-Carri\u00f3, J., Scaramuzza, D.: Video to events: recycling video datasets for event cameras. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3586\u20133595 (2020)","DOI":"10.1109\/CVPR42600.2020.00364"},{"issue":"2","key":"3_CR19","doi-asserted-by":"publisher","first-page":"2822","DOI":"10.1109\/LRA.2021.3060707","volume":"6","author":"D Gehrig","year":"2021","unstructured":"Gehrig, D., R\u00fcegg, M., Gehrig, M., Hidalgo-Carri\u00f3, J., Scaramuzza, D.: Combining events and frames using recurrent asynchronous multimodal networks for monocular depth prediction. IEEE Robot. Autom. Lett. 6(2), 2822\u20132829 (2021)","journal-title":"IEEE Robot. Autom. Lett."},{"key":"3_CR20","doi-asserted-by":"crossref","unstructured":"Girdhar, R., Carreira, J., Doersch, C., Zisserman, A.: Video action transformer network. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV) (2019)","DOI":"10.1109\/CVPR.2019.00033"},{"key":"3_CR21","doi-asserted-by":"crossref","unstructured":"Godard, C., Mac Aodha, O., Brostow, G.J.: Unsupervised monocular depth estimation with left-right consistency. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 270\u2013279 (2017)","DOI":"10.1109\/CVPR.2017.699"},{"key":"3_CR22","doi-asserted-by":"crossref","unstructured":"Godard, C., Mac Aodha, O., Brostow, G.J.: Unsupervised monocular depth estimation with left-right consistency. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2017)","DOI":"10.1109\/CVPR.2017.699"},{"key":"3_CR23","doi-asserted-by":"crossref","unstructured":"Godard, C., Mac Aodha, O., Firman, M., Brostow, G.J.: Digging into self-supervised monocular depth estimation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 3828\u20133838 (2019)","DOI":"10.1109\/ICCV.2019.00393"},{"issue":"2","key":"3_CR24","doi-asserted-by":"publisher","first-page":"187","DOI":"10.1007\/s41095-021-0229-5","volume":"7","author":"MH Guo","year":"2021","unstructured":"Guo, M.H., Cai, J.X., Liu, Z.N., Mu, T.J., Martin, R.R., Hu, S.M.: PCT: point cloud transformer. Comput. Vis. Media 7(2), 187\u2013199 (2021). https:\/\/doi.org\/10.1007\/s41095-021-0229-5","journal-title":"Comput. Vis. Media"},{"issue":"1","key":"3_CR25","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1038\/s41598-018-37186-2","volume":"9","author":"G Haessig","year":"2019","unstructured":"Haessig, G., Berthelon, X., Ieng, S.H., Benosman, R.: A spiking neural network model of depth from defocus for event-based neuromorphic vision. Scient. Rep. 9(1), 1\u201311 (2019)","journal-title":"Scient. Rep."},{"key":"3_CR26","doi-asserted-by":"crossref","unstructured":"Hidalgo-Carri\u00f3, J., Gehrig, D., Scaramuzza, D.: Learning monocular dense depth from events. In: 2020 International Conference on 3D Vision (3DV), pp. 534\u2013542. IEEE (2020)","DOI":"10.1109\/3DV50981.2020.00063"},{"key":"3_CR27","doi-asserted-by":"crossref","unstructured":"Hu, L., Zhao, R., Ding, Z., Xiong, R., Ma, L., Huang, T.: SCFlow: optical flow estimation for spiking camera. arXiv preprint arXiv:2110.03916 (2021)","DOI":"10.1109\/CVPR52688.2022.01732"},{"key":"3_CR28","doi-asserted-by":"crossref","unstructured":"Huang, T., et al.: 1000x faster camera and machine vision with ordinary devices. Engineering (2022)","DOI":"10.1016\/j.eng.2022.01.012"},{"key":"3_CR29","doi-asserted-by":"crossref","unstructured":"Johnston, A., Carneiro, G.: Self-supervised monocular trained depth estimation using self-attention and discrete disparity volume. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4756\u20134765 (2020)","DOI":"10.1109\/CVPR42600.2020.00481"},{"key":"3_CR30","doi-asserted-by":"publisher","first-page":"64","DOI":"10.1162\/tacl_a_00300","volume":"8","author":"M Joshi","year":"2020","unstructured":"Joshi, M., Chen, D., Liu, Y., Weld, D.S., Zettlemoyer, L., Levy, O.: SpanBERT: improving pre-training by representing and predicting spans. Trans. Assoc. Comput. Linguist. 8, 64\u201377 (2020)","journal-title":"Trans. Assoc. Comput. Linguist."},{"key":"3_CR31","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"349","DOI":"10.1007\/978-3-319-46466-4_21","volume-title":"Computer Vision \u2013 ECCV 2016","author":"H Kim","year":"2016","unstructured":"Kim, H., Leutenegger, S., Davison, A.J.: Real-time 3D reconstruction and 6-DoF tracking with an event camera. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9910, pp. 349\u2013364. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46466-4_21"},{"key":"3_CR32","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)"},{"key":"3_CR33","doi-asserted-by":"crossref","unstructured":"Kopf, J., Rong, X., Huang, J.B.: Robust consistent video depth estimation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1611\u20131621 (2021)","DOI":"10.1109\/CVPR46437.2021.00166"},{"key":"3_CR34","doi-asserted-by":"crossref","unstructured":"Lee, J.H., Kim, C.S.: Monocular depth estimation using relative depth maps. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2019)","DOI":"10.1109\/CVPR.2019.00996"},{"key":"3_CR35","doi-asserted-by":"crossref","unstructured":"Lee, Y., Kim, J., Willette, J., Hwang, S.J.: MPVit: multi-path vision transformer for dense prediction. arXiv preprint arXiv:2112.11010 (2021)","DOI":"10.1109\/CVPR52688.2022.00714"},{"key":"3_CR36","doi-asserted-by":"crossref","unstructured":"Li, Z., Snavely, N.: MegaDepth: learning single-view depth prediction from internet photos. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2018)","DOI":"10.1109\/CVPR.2018.00218"},{"key":"3_CR37","doi-asserted-by":"crossref","unstructured":"Liang, J., Cao, J., Sun, G., Zhang, K., Van Gool, L., Timofte, R.: SwinIR: image restoration using swin transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 1833\u20131844 (2021)","DOI":"10.1109\/ICCVW54120.2021.00210"},{"issue":"2","key":"3_CR38","doi-asserted-by":"publisher","first-page":"566","DOI":"10.1109\/JSSC.2007.914337","volume":"43","author":"P Lichtsteiner","year":"2008","unstructured":"Lichtsteiner, P., Posch, C., Delbruck, T.: A 128$$\\times $$128 120db 15$$\\mu $$s latency asynchronous temporal contrast vision sensor. IEEE J. Solid-state Circ. 43(2), 566\u2013576 (2008)","journal-title":"IEEE J. Solid-state Circ."},{"key":"3_CR39","doi-asserted-by":"crossref","unstructured":"Liu, F., Shen, C., Lin, G.: Deep convolutional neural fields for depth estimation from a single image. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2015)","DOI":"10.1109\/CVPR.2015.7299152"},{"key":"3_CR40","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"3_CR41","unstructured":"Liu, Z., et al.: Video swin transformer. arXiv preprint arXiv:2106.13230 (2021)"},{"key":"3_CR42","unstructured":"Liu, Z., et al.: ConvTransformer: a convolutional transformer network for video frame synthesis. arXiv preprint arXiv:2011.10185 (2020)"},{"issue":"2","key":"3_CR43","doi-asserted-by":"publisher","first-page":"266","DOI":"10.1016\/j.neuron.2012.10.002","volume":"76","author":"RH Masland","year":"2012","unstructured":"Masland, R.H.: The neuronal organization of the retina. Neuron 76(2), 266\u2013280 (2012)","journal-title":"Neuron"},{"key":"3_CR44","doi-asserted-by":"crossref","unstructured":"Miangoleh, S.M.H., Dille, S., Mai, L., Paris, S., Aksoy, Y.: Boosting monocular depth estimation models to high-resolution via content-adaptive multi-resolution merging. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition(CVPR), pp. 9685\u20139694 (2021)","DOI":"10.1109\/CVPR46437.2021.00956"},{"key":"3_CR45","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I., et al.: Language models are unsupervised multitask learners. OpenAI blog (2019)"},{"key":"3_CR46","doi-asserted-by":"crossref","unstructured":"Ranftl, R., Bochkovskiy, A., Koltun, V.: Vision transformers for dense prediction. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 12179\u201312188 (2021)","DOI":"10.1109\/ICCV48922.2021.01196"},{"issue":"12","key":"3_CR47","doi-asserted-by":"publisher","first-page":"1394","DOI":"10.1007\/s11263-017-1050-6","volume":"126","author":"H Rebecq","year":"2018","unstructured":"Rebecq, H., Gallego, G., Mueggler, E., Scaramuzza, D.: EMVS: event-based multi-view stereo-3D reconstruction with an event camera in real-time. Int. J. Comput. Vis. 126(12), 1394\u20131414 (2018)","journal-title":"Int. J. Comput. Vis."},{"key":"3_CR48","doi-asserted-by":"crossref","unstructured":"Rebecq, H., Gallego, G., Scaramuzza, D.: EMVS: event-based multi-view stereo. In: British Machine Vision Conference (BMVC) (2016)","DOI":"10.5244\/C.30.63"},{"key":"3_CR49","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"234","DOI":"10.1007\/978-3-319-24574-4_28","volume-title":"Medical Image Computing and Computer-Assisted Intervention \u2013 MICCAI 2015","author":"O Ronneberger","year":"2015","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-Net: convolutional networks for biomedical image segmentation. In: Navab, N., Hornegger, J., Wells, W.M., Frangi, A.F. (eds.) MICCAI 2015. LNCS, vol. 9351, pp. 234\u2013241. Springer, Cham (2015). https:\/\/doi.org\/10.1007\/978-3-319-24574-4_28"},{"issue":"5","key":"3_CR50","doi-asserted-by":"publisher","first-page":"824","DOI":"10.1109\/TPAMI.2008.132","volume":"31","author":"A Saxena","year":"2008","unstructured":"Saxena, A., Sun, M., Ng, A.Y.: Make3d: learning 3d scene structure from a single still image. IEEE Trans. Patt. Anal. Mach. Intell. 31(5), 824\u2013840 (2008)","journal-title":"IEEE Trans. Patt. Anal. Mach. Intell."},{"key":"3_CR51","first-page":"802","volume":"28","author":"X Shi","year":"2015","unstructured":"Shi, X., Chen, Z., Wang, H., Yeung, D.Y., Wong, W.K., Woo, W.C.: Convolutional LSTM network: a machine learning approach for precipitation nowcasting. Adv. Neural Inf. Process. Syst. 28, 802\u2013810 (2015)","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"3_CR52","doi-asserted-by":"crossref","unstructured":"Sim, H., Oh, J., Kim, M.: XVFi: extreme video frame interpolation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 14489\u201314498 (2021)","DOI":"10.1109\/ICCV48922.2021.01422"},{"key":"3_CR53","doi-asserted-by":"crossref","unstructured":"Son, B., et al.: A 640$$\\times $$ 480 dynamic vision sensor with a 9$$\\mu $$m pixel and 300meps address-event representation. In: IEEE International Solid-State Circuits Conference (ISSCC), pp. 66\u201367 (2017)","DOI":"10.1109\/ISSCC.2017.7870263"},{"key":"3_CR54","doi-asserted-by":"crossref","unstructured":"Varma, A., Chawla, H., Zonooz, B., Arani, E.: Transformers in self-supervised monocular depth estimation with unknown camera intrinsics. arXiv preprint arXiv:2202.03131 (2022)","DOI":"10.5220\/0010884000003124"},{"key":"3_CR55","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems (NeurIPS) (2017)"},{"key":"3_CR56","doi-asserted-by":"crossref","unstructured":"Wang, C., Buenaposada, J.M., Zhu, R., Lucey, S.: Learning depth from monocular videos using direct methods. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2018)","DOI":"10.1109\/CVPR.2018.00216"},{"key":"3_CR57","doi-asserted-by":"crossref","unstructured":"Wang, W., et al.: Pyramid vision transformer: a versatile backbone for dense prediction without convolutions. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 568\u2013578 (2021)","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"3_CR58","doi-asserted-by":"crossref","unstructured":"Wang, Y., et al.: End-to-end video instance segmentation with transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 8741\u20138750 (2021)","DOI":"10.1109\/CVPR46437.2021.00863"},{"issue":"10","key":"3_CR59","doi-asserted-by":"publisher","first-page":"747","DOI":"10.1038\/nrn1497","volume":"5","author":"H W\u00e4ssle","year":"2004","unstructured":"W\u00e4ssle, H.: Parallel processing in the mammalian retina. Nat. Rev. Neurosci. 5(10), 747\u2013757 (2004)","journal-title":"Nat. Rev. Neurosci."},{"key":"3_CR60","doi-asserted-by":"crossref","unstructured":"Weng, W., Zhang, Y., Xiong, Z.: Event-based video reconstruction using transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 2563\u20132572 (2021)","DOI":"10.1109\/ICCV48922.2021.00256"},{"key":"3_CR61","doi-asserted-by":"crossref","unstructured":"Yang, G., Tang, H., Ding, M., Sebe, N., Ricci, E.: Transformer-based attention networks for continuous pixel-wise prediction. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 16269\u201316279 (2021)","DOI":"10.1109\/ICCV48922.2021.01596"},{"key":"3_CR62","doi-asserted-by":"crossref","unstructured":"You, Z., Tsai, Y.H., Chiu, W.C., Li, G.: Towards interpretable deep networks for monocular depth estimation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 12879\u201312888 (2021)","DOI":"10.1109\/ICCV48922.2021.01264"},{"key":"3_CR63","doi-asserted-by":"crossref","unstructured":"Yu, X., Rao, Y., Wang, Z., Liu, Z., Lu, J., Zhou, J.: PoinTr: diverse point cloud completion with geometry-aware transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 12498\u201312507 (2021)","DOI":"10.1109\/ICCV48922.2021.01227"},{"key":"3_CR64","doi-asserted-by":"crossref","unstructured":"Yu, X., Tang, L., Rao, Y., Huang, T., Zhou, J., Lu, J.: Point-BERT: pre-training 3D point cloud transformers with masked point modeling. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 19313\u201319322 (2022)","DOI":"10.1109\/CVPR52688.2022.01871"},{"key":"3_CR65","unstructured":"Yuan, Y., et al.: HRFormer: high-resolution transformer for dense prediction. arXiv preprint arXiv:2110.09408 (2021)"},{"key":"3_CR66","doi-asserted-by":"crossref","unstructured":"Zhao, H., Jiang, L., Jia, J., Torr, P.H., Koltun, V.: Point transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 16259\u201316268 (2021)","DOI":"10.1109\/ICCV48922.2021.01595"},{"key":"3_CR67","doi-asserted-by":"crossref","unstructured":"Zhao, J., Xie, J., Xiong, R., Zhang, J., Yu, Z., Huang, T.: Super resolve dynamic scene from continuous spike streams. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 2533\u20132542 (2021)","DOI":"10.1109\/ICCV48922.2021.00253"},{"key":"3_CR68","doi-asserted-by":"crossref","unstructured":"Zhao, J., Xiong, R., Liu, H., Zhang, J., Huang, T.: Spk2ImgNet: learning to reconstruct dynamic scene from continuous spike stream. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 11996\u201312005 (2021)","DOI":"10.1109\/CVPR46437.2021.01182"},{"key":"3_CR69","doi-asserted-by":"crossref","unstructured":"Zheng, Y., Zheng, L., Yu, Z., Shi, B., Tian, Y., Huang, T.: High-speed image reconstruction through short-term plasticity for spiking cameras. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 6358\u20136367 (2021)","DOI":"10.1109\/CVPR46437.2021.00629"},{"key":"3_CR70","doi-asserted-by":"crossref","unstructured":"Zhou, Y., Gallego, G., Rebecq, H., Kneip, L., Li, H., Scaramuzza, D.: Semi-dense 3D reconstruction with a stereo event camera. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 235\u2013251 (2018)","DOI":"10.1007\/978-3-030-01246-5_15"},{"key":"3_CR71","doi-asserted-by":"crossref","unstructured":"Zhu, A.Z., Chen, Y., Daniilidis, K.: Realtime time synchronized event-based stereo. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 433\u2013447 (2018)","DOI":"10.1007\/978-3-030-01231-1_27"},{"key":"3_CR72","doi-asserted-by":"crossref","unstructured":"Zhu, A.Z., Yuan, L., Chaney, K., Daniilidis, K.: Unsupervised event-based learning of optical flow, depth, and egomotion. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 989\u2013997 (2019)","DOI":"10.1109\/CVPR.2019.00108"},{"key":"3_CR73","doi-asserted-by":"crossref","unstructured":"Zhu, L., Dong, S., Huang, T., Tian, Y.: A retina-inspired sampling method for visual texture reconstruction. In: IEEE International Conference on Multimedia and Expo (ICME), pp. 1432\u20131437. IEEE (2019)","DOI":"10.1109\/ICME.2019.00248"},{"key":"3_CR74","doi-asserted-by":"crossref","unstructured":"Zhu, L., Dong, S., Li, J., Huang, T., Tian, Y.: Retina-like visual image reconstruction via spiking neural model. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2020)","DOI":"10.1109\/CVPR42600.2020.00151"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-20071-7_3","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,12]],"date-time":"2022-11-12T05:18:08Z","timestamp":1668230288000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-20071-7_3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031200700","9783031200717"],"references-count":74,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-20071-7_3","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"13 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}