{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,13]],"date-time":"2025-11-13T18:29:36Z","timestamp":1763058576259,"version":"3.37.3"},"reference-count":66,"publisher":"Springer Science and Business Media LLC","issue":"9-11","license":[{"start":{"date-parts":[[2021,6,25]],"date-time":"2021-06-25T00:00:00Z","timestamp":1624579200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,6,25]],"date-time":"2021-06-25T00:00:00Z","timestamp":1624579200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"funder":[{"name":"the Key Technological Innovation Projects of Hubei Province","award":["2018AAA062"],"award-info":[{"award-number":["2018AAA062"]}]},{"name":"Wuhan University-Huawei GeoInformatices Innovation Lab"},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61972298"],"award-info":[{"award-number":["61972298"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Science and Technology Cooperation Project of The Xinjiang Production and Construction Corps","award":["2019BC008"],"award-info":[{"award-number":["2019BC008"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Vis Comput"],"published-print":{"date-parts":[[2021,9]]},"DOI":"10.1007\/s00371-021-02206-2","type":"journal-article","created":{"date-parts":[[2021,6,25]],"date-time":"2021-06-25T14:02:46Z","timestamp":1624629766000},"page":"2567-2580","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":14,"title":["Self-supervised monocular depth estimation based on image texture detail enhancement"],"prefix":"10.1007","volume":"37","author":[{"given":"Yuanzhen","family":"Li","sequence":"first","affiliation":[]},{"given":"Fei","family":"Luo","sequence":"additional","affiliation":[]},{"given":"Wenjie","family":"Li","sequence":"additional","affiliation":[]},{"given":"Shenjie","family":"Zheng","sequence":"additional","affiliation":[]},{"given":"Huan-huan","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Chunxia","family":"Xiao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,6,25]]},"reference":[{"issue":"2","key":"2206_CR1","doi-asserted-by":"publisher","first-page":"88","DOI":"10.1016\/j.patrec.2008.04.005","volume":"30","author":"GJ Brostow","year":"2009","unstructured":"Brostow, G.J., Fauqueur, J., Cipolla, R.: Semantic object classes in video: A high-definition ground truth database. Patt. Recognit. Lett. 30(2), 88\u201397 (2009)","journal-title":"Patt. Recognit. Lett."},{"issue":"4","key":"2206_CR2","doi-asserted-by":"publisher","first-page":"532","DOI":"10.1109\/TCOM.1983.1095851","volume":"31","author":"P Burt","year":"1983","unstructured":"Burt, P., Adelson, E.: The laplacian pyramid as a compact image code. IEEE Trans. Commun. 31(4), 532\u2013540 (1983)","journal-title":"IEEE Trans. Commun."},{"key":"2206_CR3","doi-asserted-by":"crossref","unstructured":"Casser, V., Pirk, S., Mahjourian, R., Angelova, A.: Depth prediction without the sensors: Leveraging structure for unsupervised learning from monocular videos. In: AAAI, pp. 8001\u20138008 (2019)","DOI":"10.1609\/aaai.v33i01.33018001"},{"key":"2206_CR4","doi-asserted-by":"crossref","unstructured":"Chen, P.Y., Liu, A.H., Liu, Y.C., Wang, Y.C.F.: Towards scene understanding: Unsupervised monocular depth estimation with semantic-aware representation. In: CVPR, pp. 2619\u20132627 (2019)","DOI":"10.1109\/CVPR.2019.00273"},{"key":"2206_CR5","doi-asserted-by":"crossref","unstructured":"Cordts, M., Omran, M., Ramos, S., Rehfeld, T., Enzweiler, M., Benenson, R., Franke, U., Roth, S., Schiele, B.: The cityscapes dataset for semantic urban scene understanding. In: CVPR, pp. 3213\u20133223 (2016)","DOI":"10.1109\/CVPR.2016.350"},{"issue":"4","key":"2206_CR6","doi-asserted-by":"publisher","first-page":"343","DOI":"10.1109\/TMI.2002.1000258","volume":"21","author":"S Dippel","year":"2002","unstructured":"Dippel, S., Stahl, M., Wiemker, R., Blaffert, T.: Multiscale contrast enhancement for radiographies: Laplacian pyramid versus fast wavelet transform. IEEE Trans. Med. Imaging 21(4), 343\u2013353 (2002)","journal-title":"IEEE Trans. Med. Imaging"},{"issue":"12","key":"2206_CR7","doi-asserted-by":"publisher","first-page":"2091","DOI":"10.1109\/TIP.2005.859376","volume":"14","author":"M Do","year":"2005","unstructured":"Do, M., Vetterli, M.: The Contourlet Transform: An Efficient Directional Multiresolution Image Representation. IEEE Trans. Image Process. 14(12), 2091\u20132106 (2005)","journal-title":"IEEE Trans. Image Process."},{"key":"2206_CR8","doi-asserted-by":"crossref","unstructured":"Eigen, D., Fergus, R.: Predicting depth, surface normals and semantic labels with a common multi-scale convolutional architecture. In: ICCV, pp. 2650\u20132658 (2015)","DOI":"10.1109\/ICCV.2015.304"},{"key":"2206_CR9","unstructured":"Eigen, D., Puhrsch, C., Fergus, R.: Depth map prediction from a single image using a multi-scale deep network. In: NIPS (2014)"},{"issue":"10\u201312","key":"2206_CR10","doi-asserted-by":"publisher","first-page":"2175","DOI":"10.1007\/s00371-020-01916-3","volume":"36","author":"X Fan","year":"2020","unstructured":"Fan, X., Wu, W., Zhang, L., Yan, Q., Fu, G., Chen, Z., Long, C., Xiao, C.: Shading-aware shadow detection and removal from a single image. Visual Comput. 36(10\u201312), 2175\u20132188 (2020)","journal-title":"Visual Comput."},{"key":"2206_CR11","doi-asserted-by":"crossref","unstructured":"Fattal, R., Agrawala, M., Rusinkiewicz, S.: Multiscale shape and detail enhancement from multi-light image collections. ACM Transactions on Graphics 26(3),(2007)","DOI":"10.1145\/1276377.1276441"},{"key":"2206_CR12","doi-asserted-by":"crossref","unstructured":"Flynn, J., Neulander, I., Philbin, J., Snavely, N.: Deepstereo: Learning to predict new views from the world\u2019s imagery. In: CVPR, pp. 5515\u20135524 (2016)","DOI":"10.1109\/CVPR.2016.595"},{"key":"2206_CR13","doi-asserted-by":"crossref","unstructured":"Fu, H., Gong, M., Wang, C., Batmanghelich, K., Tao, D.: Deep ordinal regression network for monocular depth estimation. In: CVPR, pp. 2002\u20132011 (2018)","DOI":"10.1109\/CVPR.2018.00214"},{"issue":"10\u201312","key":"2206_CR14","doi-asserted-by":"publisher","first-page":"2215","DOI":"10.1007\/s00371-020-01899-1","volume":"36","author":"Y Fu","year":"2020","unstructured":"Fu, Y., Yan, Q., Liao, J., Chow, A.L.H., Xiao, C.: Real-time dense 3D reconstruction and camera tracking via embedded planes representation. Visual Comput. 36(10\u201312), 2215\u20132226 (2020)","journal-title":"Visual Comput."},{"key":"2206_CR15","doi-asserted-by":"crossref","unstructured":"Fu, Y., Yan, Q., Liao, J., Xiao, C.: Joint texture and geometry optimization for rgb-d reconstruction. In: CVPR, pp. 5949\u20135958 (2020)","DOI":"10.1109\/CVPR42600.2020.00599"},{"key":"2206_CR16","doi-asserted-by":"crossref","unstructured":"Garg, R., VijayKumar, B.G., Carneiro, G., Reid, I.: Unsupervised cnn for single view depth estimation: Geometry to the rescue. In: ECCV, pp. 740\u2013756 (2016)","DOI":"10.1007\/978-3-319-46484-8_45"},{"issue":"11","key":"2206_CR17","first-page":"130","volume":"3","author":"V Garg","year":"2012","unstructured":"Garg, V., Singh, K.: An improved grunwald-letnikov fractional differential mask for image texture enhancement. Int. J. Adv. Comput. Sci. Appl. 3(11), 130\u2013135 (2012)","journal-title":"Int. J. Adv. Comput. Sci. Appl."},{"key":"2206_CR18","doi-asserted-by":"crossref","unstructured":"Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? the kitti vision benchmark suite. In: CVPR, pp. 3354\u20133361 (2012)","DOI":"10.1109\/CVPR.2012.6248074"},{"key":"2206_CR19","doi-asserted-by":"crossref","unstructured":"Godard, C., Mac\u00a0Aodha, O., Brostow, G.J.: Unsupervised monocular depth estimation with left-right consistency. In: CVPR, pp. 6602\u20136611 (2017)","DOI":"10.1109\/CVPR.2017.699"},{"key":"2206_CR20","doi-asserted-by":"crossref","unstructured":"Godard, C., Mac\u00a0Aodha, O., Firman, M., Brostow, G.: Digging into self-supervised monocular depth estimation. In: ICCV, pp. 3827\u20133837 (2019)","DOI":"10.1109\/ICCV.2019.00393"},{"key":"2206_CR21","doi-asserted-by":"crossref","unstructured":"Guizilini, V., Ambrus, R., Pillai, S., Raventos, A., Gaidon, A.: 3d packing for self-supervised monocular depth estimation. In: CVPR, pp. 2482\u20132491 (2020)","DOI":"10.1109\/CVPR42600.2020.00256"},{"key":"2206_CR22","doi-asserted-by":"crossref","unstructured":"Guo, X., Li, H., Yi, S., Ren, J., Wang, X.: Learning monocular depth by distilling cross-domain stereo networks. In: ECCV, pp. 506\u2013523 (2018)","DOI":"10.1007\/978-3-030-01252-6_30"},{"key":"2206_CR23","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"issue":"2","key":"2206_CR24","doi-asserted-by":"publisher","first-page":"328","DOI":"10.1109\/TPAMI.2007.1166","volume":"30","author":"H Hirschmueller","year":"2008","unstructured":"Hirschmueller, H.: Stereo processing by semiglobal matching and mutual information. IEEE Trans. Patt. Anal. Mach. Intell. 30(2), 328\u2013341 (2008)","journal-title":"IEEE Trans. Patt. Anal. Mach. Intell."},{"key":"2206_CR25","doi-asserted-by":"crossref","unstructured":"Johnston, A., Carneiro, G.: Self-supervised monocular trained depth estimation using self-attention and discrete disparity volume. In: CVPR, pp. 4755\u20134764 (2020)","DOI":"10.1109\/CVPR42600.2020.00481"},{"key":"2206_CR26","unstructured":"Karen, S., Andrew, Z.: Very deep convolutional networks for large-scale image. In: ICLR (2015)"},{"issue":"11","key":"2206_CR27","doi-asserted-by":"publisher","first-page":"2144","DOI":"10.1109\/TPAMI.2014.2316835","volume":"36","author":"K Karsch","year":"2014","unstructured":"Karsch, K., Liu, C., Kang, S.B.: Depthtransfer: Depth extraction from video using non-parametric sampling. IEEE Trans. Patt. Anal. Mach. Intell. 36(11), 2144\u20132158 (2014)","journal-title":"IEEE Trans. Patt. Anal. Mach. Intell."},{"key":"2206_CR28","doi-asserted-by":"crossref","unstructured":"Kendall, A., Martirosyan, H., Dasgupta, S., Henry, P., Kennedy, R., Bachrach, A., Bry, A.: End-to-end learning of geometry and context for deep stereo regression. In: ICCV, pp. 66\u201375 (2017)","DOI":"10.1109\/ICCV.2017.17"},{"key":"2206_CR29","doi-asserted-by":"crossref","unstructured":"Klingner, M., Term\u00f6hlen, J.A., Mikolajczyk, J., Fingscheidt, T.: Self-supervised monocular depth estimation: Solving the dynamic object problem by semantic guidance. In: ECCV, pp. 2619\u20132627 (2020)","DOI":"10.1007\/978-3-030-58565-5_35"},{"key":"2206_CR30","doi-asserted-by":"crossref","unstructured":"Klodt, M., Vedaldi, A.: Supervising the new with the old: Learning sfm from sfm. In: ECCV, pp. 713\u2013728 (2018)","DOI":"10.1007\/978-3-030-01249-6_43"},{"key":"2206_CR31","doi-asserted-by":"crossref","unstructured":"Kundu, J.N., Uppala, P.K., Pahuja, A., Babu, R.V.: Adadepth: Unsupervised content congruent adaptation for depth estimation. In: CVPR, pp. 2656\u20132665 (2018)","DOI":"10.1109\/CVPR.2018.00281"},{"key":"2206_CR32","doi-asserted-by":"crossref","unstructured":"Kuznietsov, Y., St\u00fcckle, J., Leibe, B.: Semi-supervised deep learning for monocular depth map prediction. In: CVPR, pp. 2215\u20132223 (2017)","DOI":"10.1109\/CVPR.2017.238"},{"key":"2206_CR33","doi-asserted-by":"crossref","unstructured":"Laina, I., Rupprecht, C., Belagiannis, V., Tombari, F., Navab, N.: Deeper depth prediction with fully convolutional residual networks. In: 3DV, pp. 239\u2013248 (2016)","DOI":"10.1109\/3DV.2016.32"},{"key":"2206_CR34","doi-asserted-by":"crossref","unstructured":"Li, R., Wang, S., Long, Z., Gu, D.: Undeepvo: Monocular visual odometry through unsupervised deep learning. In: ICRA, pp. 7286\u20137291 (2018)","DOI":"10.1109\/ICRA.2018.8461251"},{"key":"2206_CR35","doi-asserted-by":"crossref","unstructured":"Liao, J., Wei, M., Fu, Y., Yan, Q., Xiao, C.: Dense multiview stereo based on image texture enhancement. Computer Animation and Virtual Worlds 32(2),(2021)","DOI":"10.1002\/cav.1979"},{"key":"2206_CR36","doi-asserted-by":"crossref","unstructured":"Liu, C., Gu, J., Kim, K., Narasimhan, S.G., Kautz, J.: Neural rgb-d sensing: Depth and uncertainty from a video camera. In: CVPR, pp. 10,978\u201310,987 (2019)","DOI":"10.1109\/CVPR.2019.01124"},{"issue":"10","key":"2206_CR37","doi-asserted-by":"publisher","first-page":"2024","DOI":"10.1109\/TPAMI.2015.2505283","volume":"38","author":"F Liu","year":"2016","unstructured":"Liu, F., Shen, C., Lin, G., Reid, I.: Learning depth from single monocular images using deep convolutional neural fields. IEEE Trans. Patt. Anal. Mach. Intell. 38(10), 2024\u20132039 (2016)","journal-title":"IEEE Trans. Patt. Anal. Mach. Intell."},{"key":"2206_CR38","doi-asserted-by":"crossref","unstructured":"Liu, M., Salzmann, M., He, X.: Discrete-continuous depth estimation from a single image. In: CVPR, pp. 716\u2013723 (2014)","DOI":"10.1109\/CVPR.2014.97"},{"key":"2206_CR39","doi-asserted-by":"crossref","unstructured":"Liu, Z., Yeh, R.A., Tang, X., Liu, Y., Agarwala, A.: Video frame synthesis using deep voxel flow. In: ICCV, pp. 4473\u20134481 (2017)","DOI":"10.1109\/ICCV.2017.478"},{"issue":"10","key":"2206_CR40","doi-asserted-by":"publisher","first-page":"2624","DOI":"10.1109\/TPAMI.2019.2930258","volume":"42","author":"C Luo","year":"2020","unstructured":"Luo, C., Yang, Z., Wang, P., Wang, Y., Xu, W., Nevatia, R., Yuille, A.: Every pixel counts ++: Joint learning of geometry and motion with 3d holistic understanding. IEEE Trans. Patt. Anal. Mach. Intell. 42(10), 2624\u20132641 (2020)","journal-title":"IEEE Trans. Patt. Anal. Mach. Intell."},{"key":"2206_CR41","doi-asserted-by":"crossref","unstructured":"Luo, Y., Ren, J., Lin, M., Pang, J., Sun, W., Li, H., Lin, L.: Single view stereo matching. In: CVPR, pp. 155\u2013163 (2018)","DOI":"10.1109\/CVPR.2018.00024"},{"key":"2206_CR42","doi-asserted-by":"crossref","unstructured":"Mahjourian, R., Wicke, M., Angelova, A.: Unsupervised learning of depth and ego-motion from monocular video using 3d geometric constraints. In: CVPR, pp. 5667\u20135675 (2018)","DOI":"10.1109\/CVPR.2018.00594"},{"key":"2206_CR43","doi-asserted-by":"crossref","unstructured":"Mehta, I., Sakurikar, P., Narayanan, P.J.: Structured adversarial training for unsupervised monocular depth estimation. In: 3DV, pp. 314\u2013323 (2018)","DOI":"10.1109\/3DV.2018.00044"},{"issue":"6","key":"2206_CR44","doi-asserted-by":"publisher","first-page":"1842:1","DOI":"10.1145\/3355089.3356528","volume":"38","author":"S Niklaus","year":"2019","unstructured":"Niklaus, S., Mai, L., Yang, J., Liu, F.: 3d ken burns effect from a single image. ACM Trans. Graphics 38(6), 1842:1-1842:15 (2019)","journal-title":"ACM Trans. Graphics"},{"key":"2206_CR45","unstructured":"Paszke, A., Gross, S., Chintala, S., Chanan, G., Yang, E., DeVito, Z., Lin, Z., Desmaison, A., Antiga, L., Lerer, A.: Automatic differentiation in pytorch. In: NIPS (2017)"},{"key":"2206_CR46","doi-asserted-by":"crossref","unstructured":"Pillai, S., Ambrus, R., Gaidon, A.: Superdepth: Self-supervised, super-resolved monocular depth estimation. In: ICRA, pp. 9250\u20139256 (2019)","DOI":"10.1109\/ICRA.2019.8793621"},{"key":"2206_CR47","unstructured":"P.Kingma, D., Lei\u00a0Ba, J.: Adam: A method for stochastic optimization. In: ICLR (2015)"},{"key":"2206_CR48","doi-asserted-by":"crossref","unstructured":"Poggi, M., Tosi, F., Mattoccia, S.: Learning monocular depth estimation with unsupervised trinocular assumptions. In: 3DV, pp. 324\u2013333 (2018)","DOI":"10.1109\/3DV.2018.00045"},{"key":"2206_CR49","doi-asserted-by":"crossref","unstructured":"Ranjan, A., Jampani, V., Balles, L., Kim, K., Sun, D., Wulff, J., Black, M.J.: Competitive collaboration: Joint unsupervised learning of depth, camera motion, optical flow and motion segmentation. In: CVPR, pp. 12,232\u201312,241 (2019)","DOI":"10.1109\/CVPR.2019.01252"},{"key":"2206_CR50","doi-asserted-by":"crossref","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-net: Convolutional networks for biomedical image segmentation. In: MICCAI, pp. 234\u2013241 (2015)","DOI":"10.1007\/978-3-319-24574-4_28"},{"issue":"5","key":"2206_CR51","doi-asserted-by":"publisher","first-page":"824","DOI":"10.1109\/TPAMI.2008.132","volume":"31","author":"A Saxena","year":"2009","unstructured":"Saxena, A., Sun, M., Ng, A.Y.: Make3d: Learning 3d scene structure from a single still image. IEEE Trans. Patt. Anal. Mach. Intell. 31(5), 824\u2013840 (2009)","journal-title":"IEEE Trans. Patt. Anal. Mach. Intell."},{"key":"2206_CR52","doi-asserted-by":"crossref","unstructured":"Srinivasan, P.P., Wang, T., Sreelal, A., Ramamoorthi, R., Ng, R.: Learning to synthesize a 4d rgbd light field from a single image. In: CVPR, pp. 2262\u20132270 (2017)","DOI":"10.1109\/ICCV.2017.246"},{"key":"2206_CR53","doi-asserted-by":"crossref","unstructured":"Tosi, F., Aleotti, F., Poggi, M., Mattoccia, S.: Learning monocular depth estimation infusing traditional stereo knowledge. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.01003"},{"key":"2206_CR54","doi-asserted-by":"crossref","unstructured":"Wang, C., Miguel\u00a0Buenaposada, J., Zhu, R., Lucey, S.: Learning depth from monocular videos using direct methods. In: CVPR, pp. 2022\u20132030 (2018)","DOI":"10.1109\/CVPR.2018.00216"},{"key":"2206_CR55","doi-asserted-by":"crossref","unstructured":"Wang, X., Girshick, R., Gupta, A., He, K.: Non-local neural networks. In: CVPR, p. 7794\u20137803 (2018)","DOI":"10.1109\/CVPR.2018.00813"},{"key":"2206_CR56","doi-asserted-by":"crossref","unstructured":"Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image quality assessment : From error visibility to structural similarity. IEEE Transactions on Image Processing 13(4),(2004)","DOI":"10.1109\/TIP.2003.819861"},{"key":"2206_CR57","doi-asserted-by":"crossref","unstructured":"Watson, J., Firman, M., Brostow, G., Turmukhambetov, D.: Self-supervised monocular depth hints. In: ICCV, pp. 2162\u20132171 (2019)","DOI":"10.1109\/ICCV.2019.00225"},{"key":"2206_CR58","doi-asserted-by":"crossref","unstructured":"Xie, J., Girshick, R., Farhadi, A.: Deep3d: Fully automatic 2d-to-3d video conversion with deep convolutional neural networks. In: ECCV, pp. 842\u2013857 (2016)","DOI":"10.1007\/978-3-319-46493-0_51"},{"key":"2206_CR59","doi-asserted-by":"crossref","unstructured":"Yang, N., Wang, R., Stueckler, J., Cremers, D.: Deep virtual stereo odometry: Leveraging deep depth prediction for monocular direct sparse odometry. In: ECCV, pp. 835\u2013852 (2018)","DOI":"10.1007\/978-3-030-01237-3_50"},{"key":"2206_CR60","doi-asserted-by":"crossref","unstructured":"Yang, Z., Wang, P., Wang, Y., Xu, W., Nevatia, R.: Lego: Learning edge with geometry all at once by watching videos. In: CVPR, pp. 225\u2013234 (2018)","DOI":"10.1109\/CVPR.2018.00031"},{"key":"2206_CR61","doi-asserted-by":"crossref","unstructured":"Yang, Z., Wang, P., Xu, W., Zhao, L., Nevatia, R.: Unsupervised learning of geometry from videos with edge-aware depth-normal consistency. In: AAAI, pp. 7493\u20137500 (2018)","DOI":"10.1609\/aaai.v32i1.12257"},{"key":"2206_CR62","doi-asserted-by":"crossref","unstructured":"Yin, Z., Shi, J.: Geonet: Unsupervised learning of dense depth, optical flow and camera pose. In: CVPR, pp. 1983\u20131992 (2018)","DOI":"10.1109\/CVPR.2018.00212"},{"key":"2206_CR63","doi-asserted-by":"crossref","unstructured":"Zhou, T., Brown, M., Snavely, N., Lowe, D.G.: Unsupervised learning of depth and ego-motion from video. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.700"},{"key":"2206_CR64","doi-asserted-by":"crossref","unstructured":"Zhou, T., Tucker, R., Flynn, J., Fyffe, G., Snavely, N.: Stereo magnification: Learning view synthesis using multiplane images. ACM Transactions on Graphics 37(4),(2018)","DOI":"10.1145\/3197517.3201323"},{"key":"2206_CR65","doi-asserted-by":"crossref","unstructured":"Zhou, T., Tulsiani, S., Sun, W., Malik, J., Efros, A.A.: View synthesis by appearance flow. In: ECCV (2016)","DOI":"10.1007\/978-3-319-46493-0_18"},{"key":"2206_CR66","doi-asserted-by":"crossref","unstructured":"Zou, Y., Luo, Z., Huang, J.B.: Df-net: Unsupervised joint learning of depth and flow using cross-task consistency. In: ECCV, pp. 38\u201355 (2018)","DOI":"10.1007\/978-3-030-01228-1_3"}],"container-title":["The Visual Computer"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-021-02206-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00371-021-02206-2\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-021-02206-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T14:30:26Z","timestamp":1672583426000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00371-021-02206-2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,6,25]]},"references-count":66,"journal-issue":{"issue":"9-11","published-print":{"date-parts":[[2021,9]]}},"alternative-id":["2206"],"URL":"https:\/\/doi.org\/10.1007\/s00371-021-02206-2","relation":{},"ISSN":["0178-2789","1432-2315"],"issn-type":[{"type":"print","value":"0178-2789"},{"type":"electronic","value":"1432-2315"}],"subject":[],"published":{"date-parts":[[2021,6,25]]},"assertion":[{"value":"9 June 2021","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"25 June 2021","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}