{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T06:11:29Z","timestamp":1771913489834,"version":"3.50.1"},"reference-count":62,"publisher":"Springer Science and Business Media LLC","issue":"6","license":[{"start":{"date-parts":[[2023,5,23]],"date-time":"2023-05-23T00:00:00Z","timestamp":1684800000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,5,23]],"date-time":"2023-05-23T00:00:00Z","timestamp":1684800000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100011688","name":"Electronic Components and Systems for European Leadership","doi-asserted-by":"publisher","award":["876487"],"award-info":[{"award-number":["876487"]}],"id":[{"id":"10.13039\/501100011688","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Sign Process Syst"],"published-print":{"date-parts":[[2023,6]]},"DOI":"10.1007\/s11265-023-01874-8","type":"journal-article","created":{"date-parts":[[2023,5,23]],"date-time":"2023-05-23T02:01:16Z","timestamp":1684807276000},"page":"703-719","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["Real-Time Light Field Video Focusing and GPU Accelerated Streaming"],"prefix":"10.1007","volume":"95","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3126-0545","authenticated-orcid":false,"given":"Tom\u00e1\u0161","family":"Chlubna","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0841-4198","authenticated-orcid":false,"given":"Tom\u00e1\u0161","family":"Milet","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7969-5877","authenticated-orcid":false,"given":"Pavel","family":"Zem\u010d\u00edk","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5795-5938","authenticated-orcid":false,"given":"Michal","family":"Kula","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,5,23]]},"reference":[{"key":"1874_CR1","doi-asserted-by":"publisher","unstructured":"Trottnow, J., Spielmann, S., Lange, T., Chelli, K., Solony, M., Smrz, P., Zemcik, P., Aenchbacher, W., Grogan, M., Alain, M., Smolic, A., Canham, T., Vu-Thanh, O., V\u00e1zquez-Corral, J., & Bertalm\u00edo, M. (2019). The potential of light fields in media productions. In: SIGGRAPH Asia 2019 Technical Briefs. SA \u201919, pp. 71\u201374. Association for Computing Machinery, New York, NY, USA. https:\/\/doi.org\/10.1145\/3355088.3365158","DOI":"10.1145\/3355088.3365158"},{"issue":"7","key":"1874_CR2","doi-asserted-by":"publisher","first-page":"319","DOI":"10.1007\/s41095-021-0205-0","volume":"2021","author":"T Chlubna","year":"2021","unstructured":"Chlubna, T., Milet, T., & Zem\u010d\u00edk, P. (2021). Real-time per-pixel focusing method for light field rendering. Computational Visual Media, 2021(7), 319\u2013333. https:\/\/doi.org\/10.1007\/s41095-021-0205-0","journal-title":"Computational Visual Media"},{"key":"1874_CR3","first-page":"3","volume-title":"Computational Models of Visual Processing","author":"EH Adelson","year":"1991","unstructured":"Adelson, E. H., & Bergen, J. R. (1991). The plenoptic function and the elements of early vision. In M. S. Landy & A. J. Movshon (Eds.), Computational Models of Visual Processing (pp. 3\u201320). Cambridge, MA: MIT Press."},{"key":"1874_CR4","doi-asserted-by":"publisher","unstructured":"Levoy, M., & Hanrahan, P. (1996) Light field rendering. In: Proceedings of the 23rd Annual Conference on Computer Graphics and Interactive Techniques. SIGGRAPH \u201996, pp. 31\u201342. Association for Computing Machinery, New York, NY, USA. https:\/\/doi.org\/10.1145\/237170.237199","DOI":"10.1145\/237170.237199"},{"key":"1874_CR5","doi-asserted-by":"publisher","unstructured":"Gortler, S. J., Grzeszczuk, R., Szeliski, R., Cohen, M. F. (1996). The lumigraph. In: Proceedings of the 23rd Annual Conference on Computer Graphics and Interactive Techniques. SIGGRAPH \u201996, pp. 43\u201354. Association for Computing Machinery, New York, NY, USA. https:\/\/doi.org\/10.1145\/237170.237200","DOI":"10.1145\/237170.237200"},{"key":"1874_CR6","doi-asserted-by":"publisher","unstructured":"Isaksen, A., McMillan, L., Gortler, S. J. (2000). Dynamically reparameterized light fields. In: Proceedings of the 27th Annual Conference on Computer Graphics and Interactive Techniques. SIGGRAPH \u201900, pp. 297\u2013306. ACM Press\/Addison-Wesley Publishing Co., USA. https:\/\/doi.org\/10.1145\/344779.344929","DOI":"10.1145\/344779.344929"},{"key":"1874_CR7","doi-asserted-by":"publisher","unstructured":"Schmeing, M., & Jiang, X. (2011). In: Wang, P.S.P. (ed.) Depth Image Based Rendering, pp. 279\u2013310. Springer, Berlin, Heidelberg. https:\/\/doi.org\/10.1007\/978-3-642-22407-2_12","DOI":"10.1007\/978-3-642-22407-2_12"},{"key":"1874_CR8","doi-asserted-by":"publisher","first-page":"5","DOI":"10.1145\/3190859","volume":"37","author":"S Lee","year":"2018","unstructured":"Lee, S., Kim, Y., & Eisemann, E. (2018). Iterative depth warping. ACM Transactions on Graphics, 37, 5. https:\/\/doi.org\/10.1145\/3190859","journal-title":"ACM Transactions on Graphics"},{"key":"1874_CR9","unstructured":"Rosenthal, P., & Linsen, L. (2008). Image-space point cloud rendering. In: Proceedings of Computer Graphics International, pp. 136\u2013143."},{"key":"1874_CR10","doi-asserted-by":"crossref","unstructured":"Waschb\u00fcsch, M., W\u00fcrmlin, S., & Gross, M. (2007). 3d video billboard clouds. In: Computer Graphics Forum, 26, 561\u2013569. Wiley Online Library.","DOI":"10.1111\/j.1467-8659.2007.01079.x"},{"issue":"4","key":"1874_CR11","first-page":"86","volume":"39","author":"M Broxton","year":"2020","unstructured":"Broxton, M., Flynn, J., Overbeck, R., Erickson, D., Hedman, P., DuVall, M., Dourgarian, J., Busch, J., Whalen, M., & Debevec, P. (2020). Immersive light field video with a layered mesh representation, 39(4), 86\u201318615.","journal-title":"Immersive light field video with a layered mesh representation"},{"issue":"3","key":"1874_CR12","doi-asserted-by":"publisher","first-page":"411","DOI":"10.1109\/76.836285","volume":"10","author":"H Yamanoue","year":"2000","unstructured":"Yamanoue, H., Okui, M., & Yuyama, I. (2000). A study on the relationship between shooting conditions and cardboard effect of stereoscopic images. IEEE Transactions on Circuits and Systems for Video Technology, 10(3), 411\u2013416. https:\/\/doi.org\/10.1109\/76.836285","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"1874_CR13","doi-asserted-by":"crossref","unstructured":"Wilburn, B. S., Smulski, M., Lee, H. -H. K., & Horowitz, M. A. (2001). Light field video camera. In: Media Processors 2002, 4674, 29\u201336. International Society for Optics and Photonics.","DOI":"10.1117\/12.451074"},{"key":"1874_CR14","first-page":"77","volume":"2002","author":"JC Yang","year":"2002","unstructured":"Yang, J. C., Everett, M., Buehler, C., & McMillan, L. (2002). A real-time distributed light field camera. Rendering Techniques, 2002, 77\u201386.","journal-title":"Rendering Techniques"},{"key":"1874_CR15","doi-asserted-by":"crossref","unstructured":"Georgiev, T., Yu, Z., Lumsdaine, A., & Goma, S. (2013). Lytro camera technology: theory, algorithms, performance analysis. In: Multimedia Content and Mobile Devices, 8667, 86671. International Society for Optics and Photonics.","DOI":"10.1117\/12.2013581"},{"issue":"9","key":"1874_CR16","doi-asserted-by":"publisher","first-page":"3179","DOI":"10.1364\/BOE.6.003179","volume":"6","author":"X Lin","year":"2015","unstructured":"Lin, X., Wu, J., Zheng, G., & Dai, Q. (2015). Camera array based light field microscopy. Biomedical optics express, 6(9), 3179\u20133189.","journal-title":"Biomedical optics express"},{"key":"1874_CR17","unstructured":"Chelli, K., Lange, T., Thorsten, H., Solony, M., Smrz, P., Alain, M., Smolic, A., Trottnow, J., & Helzle, V. (2020). A versatile 5d light field capture array. In: NEM Summit 2020. New European Media Initiative."},{"issue":"2","key":"1874_CR18","doi-asserted-by":"publisher","first-page":"121","DOI":"10.1023\/B:VISI.0000015916.91741.27","volume":"58","author":"Z Lin","year":"2004","unstructured":"Lin, Z., & Shum, H. -Y. (2004). A geometric analysis of light field rendering. International Journal of Computer Vision, 58(2), 121\u2013138. https:\/\/doi.org\/10.1023\/B:VISI.0000015916.91741.27","journal-title":"International Journal of Computer Vision"},{"key":"1874_CR19","doi-asserted-by":"crossref","unstructured":"Hamzah, R. A., & Ibrahim, H. (2016). Literature survey on stereo vision disparity map algorithms. Journal of Sensors 2016.","DOI":"10.1155\/2016\/8742920"},{"key":"1874_CR20","unstructured":"Alain, M., Aenchbacher, W., & Smolic, A. (2019). Interactive light field tilt-shift refocus with generalized shift-and-sum. ArXiv abs\/1910.04699"},{"key":"1874_CR21","unstructured":"Ng, R., Levoy, M., Br\u00e9dif, M., Duval, G., Horowitz, M., Hanrahan, P. (2005). Light field photography with a hand-held plenoptic camera. PhD thesis, Stanford University."},{"key":"1874_CR22","doi-asserted-by":"publisher","unstructured":"Sugita, K., Naemura, T., Harashima, H., & Takahashi, K. (2004). Focus measurement on programmable graphics hardware for all in-focus rendering from light fields. In: Virtual Reality Conference, IEEE, p. 255. IEEE Computer Society, Los Alamitos, CA, USA. https:\/\/doi.org\/10.1109\/VR.2004.1310096","DOI":"10.1109\/VR.2004.1310096"},{"key":"1874_CR23","doi-asserted-by":"publisher","unstructured":"Yang, R., Welch, G., & Bishop, G. (2002). Real-time consensus-based scene reconstruction using commodity graphics hardware+, 22, 225\u2013234. https:\/\/doi.org\/10.1109\/PCCGA.2002.1167864","DOI":"10.1109\/PCCGA.2002.1167864"},{"key":"1874_CR24","doi-asserted-by":"crossref","unstructured":"Gu, X., Fan, Z., Zhu, S., Dai, Z., Tan, F., & Tan, P. (2020). Cascade cost volume for high-resolution multi-view stereo and stereo matching. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2495\u20132504.","DOI":"10.1109\/CVPR42600.2020.00257"},{"key":"1874_CR25","doi-asserted-by":"publisher","unstructured":"Shi, L., Hassanieh, H., Davis, A., Katabi, D., & Durand, F. (2015). Light field reconstruction using sparsity in the continuous fourier domain.\u00a0ACM Transactions on Graphics, 34(1). https:\/\/doi.org\/10.1145\/2682631","DOI":"10.1145\/2682631"},{"issue":"1","key":"1874_CR26","doi-asserted-by":"publisher","first-page":"133","DOI":"10.1109\/TPAMI.2017.2653101","volume":"40","author":"S Vagharshakyan","year":"2018","unstructured":"Vagharshakyan, S., Bregovic, R., & Gotchev, A. (2018). Light field reconstruction using shearlet transform. IEEE Transactions on Pattern Analysis and Machine Intelligence, 40(1), 133\u2013147. https:\/\/doi.org\/10.1109\/TPAMI.2017.2653101","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"1874_CR27","doi-asserted-by":"crossref","unstructured":"Hirschmuller, H. (2005). Accurate and efficient stereo processing by semi-global matching and mutual information. In: 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR\u201905),\u00a0 2, 807\u2013814. IEEE.","DOI":"10.1109\/CVPR.2005.56"},{"key":"1874_CR28","doi-asserted-by":"crossref","unstructured":"Anisimov, Y., Wasenm\u00fcller, O., & Stricker, D. (2019). Rapid light field depth estimation with semi-global matching. 2019 IEEE 15th International Conference on Intelligent Computer Communication and Processing (ICCP), 109\u2013116.","DOI":"10.1109\/ICCP48234.2019.8959680"},{"key":"1874_CR29","doi-asserted-by":"publisher","unstructured":"Kolmogorov, V., & Zabih, R. (2001).\u00a0Multi-camera scene reconstruction via graph cuts, 2352.\u00a0https:\/\/doi.org\/10.1007\/3-540-47977-5_6","DOI":"10.1007\/3-540-47977-5_6"},{"key":"1874_CR30","doi-asserted-by":"publisher","first-page":"102878","DOI":"10.1016\/j.jvcir.2020.102878","volume":"72","author":"Y Wu","year":"2020","unstructured":"Wu, Y., Wang, Y., Liang, J., Bajic, I. V., & Wang, A. (2020). Light field all-in-focus image fusion based on spatially-guided angular information. Journal of Visual Communication and Image Representation, 72, 102878. https:\/\/doi.org\/10.1016\/j.jvcir.2020.102878","journal-title":"Journal of Visual Communication and Image Representation"},{"key":"1874_CR31","doi-asserted-by":"publisher","unstructured":"Sun, D., Roth, S., & Black, M. J. (2010). Secrets of optical flow estimation and their principles. In: 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pp. 2432\u20132439. https:\/\/doi.org\/10.1109\/CVPR.2010.5539939","DOI":"10.1109\/CVPR.2010.5539939"},{"key":"1874_CR32","doi-asserted-by":"publisher","unstructured":"Jiang, X., Pendu, M. L., & Guillemot, C. (2018). Depth estimation with occlusion handling from a sparse set of light field views. In: 2018 25th IEEE International Conference on Image Processing (ICIP), pp. 634\u2013638. https:\/\/doi.org\/10.1109\/ICIP.2018.8451466","DOI":"10.1109\/ICIP.2018.8451466"},{"key":"1874_CR33","unstructured":"Chen, Y., Alain, M., & Smolic, A. (2017). Fast and accurate optical flow based depth map estimation from light fields. In: Irish Machine Vision and Image Processing Conference (IMVIP)."},{"key":"1874_CR34","doi-asserted-by":"crossref","unstructured":"Lin, H., Chen, C., Kang, S. B., & Yu, J. (2015). Depth recovery from light field using focal stack symmetry. In: 2015 IEEE International Conference on Computer Vision (ICCV), pp. 3451\u20133459.","DOI":"10.1109\/ICCV.2015.394"},{"key":"1874_CR35","doi-asserted-by":"crossref","unstructured":"Tao, M. W., Hadap, S., Malik, J., & Ramamoorthi, R.\u00a0(2013). Depth from combining defocus and correspondence using light-field cameras. In: 2013 IEEE International Conference on Computer Vision, pp. 673\u2013680.","DOI":"10.1109\/ICCV.2013.89"},{"key":"1874_CR36","doi-asserted-by":"crossref","unstructured":"Neri, A., Carli, M., & Battisti, F. (2015). A multi-resolution approach to depth field estimation in dense image arrays. In: 2015 IEEE International Conference on Image Processing (ICIP), pp. 3358\u20133362.","DOI":"10.1109\/ICIP.2015.7351426"},{"key":"1874_CR37","doi-asserted-by":"publisher","unstructured":"Hosni, A., Bleyer, M., Rhemann, C., Gelautz, M., & Rother, C. (2011). Real-time local stereo matching using guided image filtering. In: 2011 IEEE International Conference on Multimedia and Expo, pp. 1\u20136. https:\/\/doi.org\/10.1109\/ICME.2011.6012131","DOI":"10.1109\/ICME.2011.6012131"},{"key":"1874_CR38","doi-asserted-by":"publisher","unstructured":"Penner, E., & Zhang, L. (2017). Soft 3d reconstruction for view synthesis.\u00a0ACM Transactions on Graphics, 36(6). https:\/\/doi.org\/10.1145\/3130800.3130855","DOI":"10.1145\/3130800.3130855"},{"key":"1874_CR39","unstructured":"Eigen, D., Puhrsch, C., & Fergus, R. (2014). Depth map prediction from a single image using a multi-scale deep network. In: Proceedings of the 27th International Conference on Neural Information Processing Systems - Volume 2. NIPS\u201914, pp. 2366\u20132374. MIT Press, Cambridge, MA, USA."},{"key":"1874_CR40","doi-asserted-by":"publisher","unstructured":"Peng, J., Xiong, Z., Liu, D., & Chen, X. (2018). Unsupervised depth estimation from light field using a convolutional neural network. In: 2018 International Conference on 3D Vision (3DV), pp. 295\u2013303. https:\/\/doi.org\/10.1109\/3DV.2018.00042","DOI":"10.1109\/3DV.2018.00042"},{"key":"1874_CR41","doi-asserted-by":"publisher","unstructured":"Eslami, S. M. A., JimenezRezende, D., Besse, F., Viola, F., Morcos, A. ., Garnelo, M., Ruderman, A., Rusu, A. A., Danihelka, I., Gregor, K., Reichert, D. P., Buesing, L., Weber, T., Vinyals, O., Rosenbaum, D., Rabinowitz, N., King, H., Hillier, C., Botvinick, M., Wierstra, D., Kavukcuoglu, K., & Hassabis, D. (2018). Neural scene representation and rendering. Science, 360(6394), 1204\u20131210. https:\/\/doi.org\/10.1126\/science.aar6170","DOI":"10.1126\/science.aar6170"},{"key":"1874_CR42","doi-asserted-by":"publisher","unstructured":"Han, X., Laga, H., & Bennamoun, M. (2019). Image-based 3d object reconstruction: State-of-the-art and trends in the deep learning era. IEEE Transactions on Pattern Analysis and Machine Intelligence, 1\u20131.\u00a0https:\/\/doi.org\/10.1109\/tpami.2019.2954885.","DOI":"10.1109\/tpami.2019.2954885."},{"issue":"7","key":"1874_CR43","doi-asserted-by":"publisher","first-page":"425","DOI":"10.1111\/cgf.13849","volume":"38","author":"L Ni","year":"2019","unstructured":"Ni, L., Jiang, H., Cai, J., Zheng, J., Li, H., & Liu, X. (2019). Unsupervised Dense Light Field Reconstruction with Occlusion Awareness. Computer Graphics Forum, 38(7), 425\u2013436. https:\/\/doi.org\/10.1111\/cgf.13849","journal-title":"Computer Graphics Forum"},{"key":"1874_CR44","doi-asserted-by":"crossref","unstructured":"Mildenhall, B., Srinivasan, P. P., Tancik, M., Barron, J. T., Ramamoorthi, R., & Ng, R. (2020). Nerf: Representing scenes as neural radiance fields for view synthesis. In: ECCV.","DOI":"10.1007\/978-3-030-58452-8_24"},{"issue":"3","key":"1874_CR45","doi-asserted-by":"publisher","first-page":"1319","DOI":"10.1007\/s10044-021-00956-2","volume":"24","author":"J Navarro","year":"2021","unstructured":"Navarro, J., & Sabater, N. (2021). Learning occlusion-aware view synthesis for light fields. Pattern Analysis and Applications, 24(3), 1319\u20131334. https:\/\/doi.org\/10.1007\/s10044-021-00956-2","journal-title":"Pattern Analysis and Applications"},{"key":"1874_CR46","doi-asserted-by":"crossref","unstructured":"Mildenhall, B., Srinivasan, P. P., Ortiz-Cayon, R., Kalantari, N. K., Ramamoorthi, R., Ng, R., & Kar, A. (2019).\u00a0Local Light Field Fusion: Practical View Synthesis with Prescriptive Sampling Guidelines.","DOI":"10.1145\/3306346.3322980"},{"key":"1874_CR47","doi-asserted-by":"publisher","unstructured":"Jiang, H., Sun, D., Jampani, V., Yang, M. -H., Learned-Miller, E., & Kautz, J. (2017). Super slomo: High quality estimation of multiple intermediate frames for video interpolation. CVPR 2018. https:\/\/doi.org\/10.48550\/ARXIV.1712.00080","DOI":"10.48550\/ARXIV.1712.00080"},{"issue":"4","key":"1874_CR48","doi-asserted-by":"publisher","first-page":"697","DOI":"10.1109\/TVCG.2007.1019","volume":"13","author":"H Wang","year":"2007","unstructured":"Wang, H., Sun, M., & Yang, R. (2007). Space-time light field rendering. IEEE Transactions on Visualization and Computer Graphics, 13(4), 697\u2013710.","journal-title":"IEEE Transactions on Visualization and Computer Graphics"},{"issue":"4","key":"1874_CR49","first-page":"1","volume":"36","author":"T-C Wang","year":"2017","unstructured":"Wang, T. -C., Zhu, J. -Y., Kalantari, N. K., Efros, A. A., & Ramamoorthi, R. (2017). Light field video capture using a learning-based hybrid imaging system. ACM Transactions on Graphics (TOG), 36(4), 1\u201313.","journal-title":"ACM Transactions on Graphics (TOG)"},{"key":"1874_CR50","doi-asserted-by":"crossref","unstructured":"Sabater, N., Boisson, G., Vandame, B., Kerbiriou, P., Babon, F., Hog, M., Gendrot, R., Langlois, T., Bureller, O., Schubert, A., et al. (2017). Dataset and pipeline for multi-view light-field video. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 30\u201340.","DOI":"10.1109\/CVPRW.2017.221"},{"key":"1874_CR51","doi-asserted-by":"publisher","DOI":"10.1111\/cgf.13037","author":"L Daqbala","year":"2016","unstructured":"Daqbala, L., Ziegler, M., Didyk, P., Zilly, F., Keinert, J., Myszkowski, K., Seidel, H.-P., Rokita, P., & Ritschel, T. (2016). Efficient Multi-image Correspondences for On-line Light Field Video Processing. Computer Graphics Forum. https:\/\/doi.org\/10.1111\/cgf.13037","journal-title":"Computer Graphics Forum"},{"key":"1874_CR52","doi-asserted-by":"publisher","unstructured":"Salvador, G., Chau, J., Quesada, J., & Carranza, C. (2018). Efficient gpu-based implementation of the median filter based on a multi-pixel-per-thread framework, pp. 121\u2013124. https:\/\/doi.org\/10.1109\/SSIAI.2018.8470318","DOI":"10.1109\/SSIAI.2018.8470318"},{"issue":"1","key":"1874_CR53","doi-asserted-by":"publisher","first-page":"7","DOI":"10.1023\/A:1014573219977","volume":"47","author":"D Scharstein","year":"2002","unstructured":"Scharstein, D., & Szeliski, R. (2002). A taxonomy and evaluation of dense two-frame stereo correspondence algorithms. International journal of computer vision, 47(1), 7\u201342.","journal-title":"International journal of computer vision"},{"key":"1874_CR54","unstructured":"Kawase, M. (2003). Frame buffer postprocessing effects in double-steal (wrechless). In: Game Developers Conference 2003, 3."},{"key":"1874_CR55","unstructured":"Vaish, V., & Adams, A. (2008). The (new) stanford light field archive. Computer Graphics Laboratory, Stanford University, 6(7)."},{"key":"1874_CR56","unstructured":"Rerabek, M., & Ebrahimi, T. (2016). New light field image dataset. In: 8th International Conference on Quality of Multimedia Experience (QoMEX)."},{"key":"1874_CR57","doi-asserted-by":"crossref","unstructured":"Reda, F., Kontkanen, J., Tabellion, E., Sun, D., Pantofaru, C., & Curless, B. (2022). Film: Frame interpolation for large motion. ECCV 2022.","DOI":"10.1007\/978-3-031-20071-7_15"},{"key":"1874_CR58","doi-asserted-by":"publisher","first-page":"25","DOI":"10.1007\/978-3-540-24673-2_3","volume":"3024","author":"T Brox","year":"2004","unstructured":"Brox, T., Bruhn, A., Papenberg, N., & Weickert, J. (2004). High accuracy optical flow estimation based on a theory for warping, 3024, 25\u201336. https:\/\/doi.org\/10.1007\/978-3-540-24673-2_3","journal-title":"High accuracy optical flow estimation based on a theory for warping"},{"issue":"4","key":"1874_CR59","doi-asserted-by":"publisher","first-page":"102","DOI":"10.1145\/3528223.3530127","volume":"41","author":"T M\u00fcller","year":"2022","unstructured":"M\u00fcller, T., Evans, A., Schied, C., & Keller, A. (2022). Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 41(4), 102\u2013110215. https:\/\/doi.org\/10.1145\/3528223.3530127","journal-title":"ACM Trans. Graph."},{"key":"1874_CR60","doi-asserted-by":"crossref","unstructured":"Reda, F., Kontkanen, J., Tabellion, E., Sun, D., Pantofaru, C., & Curless, B. (2022). Tensorflow 2 Implementation of \"FILM: Frame Interpolation for Large Motion\". GitHub.","DOI":"10.1007\/978-3-031-20071-7_15"},{"key":"1874_CR61","doi-asserted-by":"crossref","unstructured":"Choi, M., Choi, J., Baik, S., Kim, T. H., & Lee, K. M. (2020). Scene-adaptive video frame interpolation via meta-learning. In: CVPR.","DOI":"10.1109\/CVPR42600.2020.00946"},{"key":"1874_CR62","doi-asserted-by":"publisher","unstructured":"Ba\u0159ina, D., Chlubna, T., \u0160olony, M., Dlabaja, D., & Zem\u010d\u00edk, P. (2019). Evaluation of 4d light field compression methods. In: International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision (WSCG), Part I. Computer Science Research Notes (CSRN), vol. 2901, pp. 55\u201361. Union Agency. https:\/\/doi.org\/10.24132\/CSRN.2019.2901.1.7","DOI":"10.24132\/CSRN.2019.2901.1.7"}],"container-title":["Journal of Signal Processing Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11265-023-01874-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11265-023-01874-8\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11265-023-01874-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,12,20]],"date-time":"2023-12-20T11:17:04Z","timestamp":1703071024000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11265-023-01874-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,5,23]]},"references-count":62,"journal-issue":{"issue":"6","published-print":{"date-parts":[[2023,6]]}},"alternative-id":["1874"],"URL":"https:\/\/doi.org\/10.1007\/s11265-023-01874-8","relation":{},"ISSN":["1939-8018","1939-8115"],"issn-type":[{"value":"1939-8018","type":"print"},{"value":"1939-8115","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,5,23]]},"assertion":[{"value":"26 September 2022","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"20 April 2023","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 May 2023","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"23 May 2023","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"14 December 2023","order":5,"name":"change_date","label":"Change Date","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"Update","order":6,"name":"change_type","label":"Change Type","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"The original version of this paper was updated to correct the Code Availability link.","order":7,"name":"change_details","label":"Change Details","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing Interest"}}]}}