{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T03:04:54Z","timestamp":1740107094787,"version":"3.37.3"},"reference-count":49,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2021,3,28]],"date-time":"2021-03-28T00:00:00Z","timestamp":1616889600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,3,28]],"date-time":"2021-03-28T00:00:00Z","timestamp":1616889600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Vis Comput"],"published-print":{"date-parts":[[2022,5]]},"DOI":"10.1007\/s00371-021-02107-4","type":"journal-article","created":{"date-parts":[[2021,3,28]],"date-time":"2021-03-28T16:02:16Z","timestamp":1616947336000},"page":"1815-1832","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Whole-pixel registration of non-rigid images using correspondences interpolation on sparse feature seeds"],"prefix":"10.1007","volume":"38","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8529-7960","authenticated-orcid":false,"given":"Kai","family":"He","sequence":"first","affiliation":[]},{"given":"Yan","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Zhiguo","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Dashuang","family":"Li","sequence":"additional","affiliation":[]},{"given":"Xitao","family":"Ma","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,3,28]]},"reference":[{"issue":"7","key":"2107_CR1","doi-asserted-by":"publisher","first-page":"1502","DOI":"10.1109\/TMI.2017.2668842","volume":"36","author":"JA Collins","year":"2017","unstructured":"Collins, J.A., Weis, J.A., Heiselman, J.S., et al.: Improving registration robustness for image-guided liver surgery in a novel human-to-phantom data framework. IEEETrans. Med. Imaging 36(7), 1502\u20131510 (2017)","journal-title":"IEEETrans. Med. Imaging"},{"issue":"2","key":"2107_CR2","first-page":"766","volume":"23","author":"L Gong","year":"2018","unstructured":"Gong, L., Zhang, C., Duan, L., et al.: Nonrigid image registration using spatially region-weighted correlation ratio and GPU-acceleration. IEEE J BiomedHealth. 23(2), 766\u2013778 (2018)","journal-title":"IEEE J BiomedHealth."},{"issue":"2","key":"2107_CR3","doi-asserted-by":"publisher","first-page":"91","DOI":"10.1023\/B:VISI.0000029664.99615.94","volume":"60","author":"DG Lowe","year":"2004","unstructured":"Lowe, D.G.: Distinctive image features from scale-invariant keypoints. Int. J. Comput. Vis. 60(2), 91\u2013110 (2004)","journal-title":"Int. J. Comput. Vis."},{"key":"2107_CR4","doi-asserted-by":"crossref","unstructured":"Bay, H., Tuytelaars, H., Van Gool, L.: SURF: speeded Up robust features. In: IEEE International Conference on Computer Vision, pp. 404\u2013417, Graz, Austria (2006)","DOI":"10.1007\/11744023_32"},{"key":"2107_CR5","doi-asserted-by":"crossref","unstructured":"Alcantarilla, P.F., Bartoli, A., Davison, A.J.: KAZE features. InL European Conference on Computer Vision, pp. 214\u2013227, Florence, Italy (2012)","DOI":"10.1007\/978-3-642-33783-3_16"},{"key":"2107_CR6","doi-asserted-by":"crossref","unstructured":"Alcantarilla, P.F., Nuevo, J., Bartoli, A.: Fast explicit diffusion for accelerated features in non-linear scale SPA-CES. In: British Machine Vision Conference, pp. 13.1\u201313.11, Bristol, England (2013)","DOI":"10.5244\/C.27.13"},{"key":"2107_CR7","unstructured":"Ke, Y., Sukthankar,R.: PCA-SIFT: a more distinctive representation for local image descriptors. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 506\u2013513, Washington, DC, USA (2004)"},{"key":"2107_CR8","doi-asserted-by":"crossref","unstructured":"Rublee, E., Rabaud, V., Konolige, K., et al.: ORB: an efficient alternative to SIFT or SURF. In: IEEE International Conference on Computer Vision, pp. 2564\u20132571, Barcelona, Spain (2011)","DOI":"10.1109\/ICCV.2011.6126544"},{"issue":"5","key":"2107_CR9","doi-asserted-by":"publisher","first-page":"1147","DOI":"10.1109\/TRO.2015.2463671","volume":"31","author":"R Mur-Artal","year":"2017","unstructured":"Mur-Artal, R., Montiel, J.M.M., Tardos, J.D.: ORB-SLAM: a versatile and accurate monocular SLAM system. IEEE Trans. Robot. 31(5), 1147\u20131163 (2017)","journal-title":"IEEE Trans. Robot."},{"issue":"2","key":"2107_CR10","doi-asserted-by":"publisher","first-page":"438","DOI":"10.1137\/080732730","volume":"2","author":"JM Morel","year":"2009","unstructured":"Morel, J.M., Yu, G.: ASIFT: a new framework for fully affine invariant image comparison. SIAM J. Imaging Sci. 2(2), 438\u2013469 (2009)","journal-title":"SIAM J. Imaging Sci."},{"issue":"11","key":"2107_CR11","doi-asserted-by":"publisher","first-page":"3088","DOI":"10.1016\/j.sigpro.2013.04.008","volume":"93","author":"GR Cai","year":"2013","unstructured":"Cai, G.R., Jodoin, P.M., Li, S.Z., et al.: Perspective-sift: an efficient tool for low-altitude remote sensing image registration. Signal Process. 93(11), 3088\u20133110 (2013)","journal-title":"Signal Process."},{"key":"2107_CR12","doi-asserted-by":"publisher","DOI":"10.1117\/1.JRS.12.025002","author":"C Wang","year":"2018","unstructured":"Wang, C.: Unmanned aerial vehicle oblique image registration using an asift-based matching method. J. Appl. Remote Sens. (2018). https:\/\/doi.org\/10.1117\/1.JRS.12.025002","journal-title":"J. Appl. Remote Sens."},{"issue":"5","key":"2107_CR13","doi-asserted-by":"publisher","first-page":"667","DOI":"10.1007\/s00371-018-1502-1","volume":"35","author":"Y Liu","year":"2019","unstructured":"Liu, Y., Yu, D., Chen, X., et al.: TOP-SIFT: the selected SIFT descriptor based on dictionary learning. Vis. Comput. 35(5), 667\u2013677 (2019)","journal-title":"Vis. Comput."},{"key":"2107_CR14","doi-asserted-by":"crossref","unstructured":"Brox, T., Bruhn, A., Papenberg, N., et al: High accuracy optical flow estimation based on a theory for warping. In: European Conference on Computer Vision, pp. 25\u201336, Prague, Czech Republic (2004)","DOI":"10.1007\/978-3-540-24673-2_3"},{"key":"2107_CR15","doi-asserted-by":"publisher","first-page":"167","DOI":"10.1016\/j.imavis.2016.06.004","volume":"52","author":"Y Hu","year":"2016","unstructured":"Hu, Y., Song, R., Li, Y., et al.: Highly accurate optical flow estimation on superpixel tree\u2019. Image Vis. Comput. 52, 167\u2013177 (2016)","journal-title":"Image Vis. Comput."},{"issue":"9","key":"2107_CR16","doi-asserted-by":"publisher","first-page":"1744","DOI":"10.1109\/TPAMI.2011.236","volume":"34","author":"L Xu","year":"2012","unstructured":"Xu, L., Jia, J., Matsushita, Y.: Motion detail preserving optical flow estimation. IEEE Trans. Pattern Anal. Mach. Intell. 34(9), 1744\u20131757 (2012)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"3","key":"2107_CR17","doi-asserted-by":"publisher","first-page":"500","DOI":"10.1109\/TPAMI.2010.143","volume":"33","author":"T Brox","year":"2011","unstructured":"Brox, T., Malik, J.: Large displacement optical flow: descriptor matching in variational motion estimation. IEEE Trans. Pattern Anal. Mach. Intell. 33(3), 500\u2013513 (2011)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"2107_CR18","doi-asserted-by":"crossref","unstructured":"Rak\u00eat, L.L., Roholm, L., Bruhn, A., et al.: Motion Compensated Frame Interpolation with a Symmetric Optical Flow Constraint. In: International Symposium on Visual Computing, pp. 447\u2013457, Berlin, Heidelberg (2012)","DOI":"10.1007\/978-3-642-33179-4_43"},{"key":"2107_CR19","doi-asserted-by":"crossref","unstructured":"Niklaus, S., Liu, F.: Context-aware Synthesis for Video Frame Interpolation.abs\/1803.10967 (2018)","DOI":"10.1109\/CVPR.2018.00183"},{"key":"2107_CR20","doi-asserted-by":"crossref","unstructured":"Weinzaepfel, P., Revaud, J., Harchaoui, Z., et al.: DeepFlow: large displacement optical flow with deep matching. In: IEEE International Conference onComputer Vision, pp. 1385\u20131392, Sydney, NSW, Australia (2013)","DOI":"10.1109\/ICCV.2013.175"},{"key":"2107_CR21","doi-asserted-by":"crossref","unstructured":"Kroeger, T., Timofte, R., Dai, D., et al.: Fast optical flow using dense inverse search. In: European Conference on Computer Vision, pp. 471\u2013488, Amsterdam, Netherlands (2016)","DOI":"10.1007\/978-3-319-46493-0_29"},{"key":"2107_CR22","doi-asserted-by":"crossref","unstructured":"Hu, Y., Song, R., Li, Y.: Efficient coarse-to-fine patch match for large displacement optical flow. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 5704\u20135712, Las Vegas, NV, USA (2016)","DOI":"10.1109\/CVPR.2016.615"},{"key":"2107_CR23","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2018.2885246","author":"J Chen","year":"2018","unstructured":"Chen, J., Cai, Z., Lai, J., et al.: Efficient segmentation-based patch match for large displacement optical flow estimation. IEEE Trans. Circuits Syst. Video Technol (2018). https:\/\/doi.org\/10.1109\/TCSVT.2018.2885246","journal-title":"IEEE Trans. Circuits Syst. Video Technol"},{"key":"2107_CR24","doi-asserted-by":"crossref","unstructured":"Xu, J., Ranftl, R., and Koltun, V.: Accurate optical flow via direct cost volume processing. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 5807\u20135815, Honolulu, HI, USA (2017)","DOI":"10.1109\/CVPR.2017.615"},{"key":"2107_CR25","doi-asserted-by":"crossref","unstructured":"Revaud, J., Weinzaepfel, P., Harchaoui, Z., et al.: EpicFlow:Edge-preserving interpolation of correspondences for optical flow. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 1164\u20131172, Boston, MA, USA (2015)","DOI":"10.1109\/CVPR.2015.7298720"},{"key":"2107_CR26","doi-asserted-by":"crossref","unstructured":"Geistert, J., Senst, T., and Sikora, T.: Robust local optical flow: Dense motion vector field interpolation. In: Picture Coding Symposium, pp. 1\u20135, Nuremberg, Germany (2016)","DOI":"10.1109\/PCS.2016.7906352"},{"key":"2107_CR27","doi-asserted-by":"crossref","unstructured":"Bailer, C., Taetz, B., and Stricker, D.: Flow fields: dense correspondence fields for highly accurate large displacement optical flow estimation. In: IEEE International Conference on Computer Vision, pp. 4015\u20134023, Santiago, Chile (2015)","DOI":"10.1109\/ICCV.2015.457"},{"key":"2107_CR28","doi-asserted-by":"crossref","unstructured":"Schuster, R., Bailer, C., Wasenm\u00fcller, O., et al.: FlowFields++: accurate optical flow correspondences meet robust interpolation. In: IEEE International Conference on Image Processing, pp. 1463\u20131467, Athens, Greece (2018)","DOI":"10.1109\/ICIP.2018.8451182"},{"key":"2107_CR29","doi-asserted-by":"crossref","unstructured":"Chen, Q., Koltun, V.: Full Flow: optical flow estimation by global optimization over regular grids. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 4706\u20134714, Las Vegas, NV, USA (2016)","DOI":"10.1109\/CVPR.2016.509"},{"key":"2107_CR30","doi-asserted-by":"crossref","unstructured":"Hu, Y., Li, Y., Song, R.: Robust Interpolation of correspondences for large displacement optical flow. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 4791\u20134799, Honolulu, HI, USA (2017)","DOI":"10.1109\/CVPR.2017.509"},{"key":"2107_CR31","doi-asserted-by":"crossref","unstructured":"Wulff, J., Black, M.J.: Efficient sparse-to-dense optical flow estimation using a learned basis and layers. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 120\u2013130, Boston, MA, USA (2015)","DOI":"10.1109\/CVPR.2015.7298607"},{"key":"2107_CR32","doi-asserted-by":"crossref","unstructured":"Dosovitskiy, A., Fischer, P., Ilg, E., et al.: Flownet: learning optical flow with convolutional networks. In: IEEE International Conference on Computer Vision, pp. 2758\u20132766, Santiago, Chile (2015)","DOI":"10.1109\/ICCV.2015.316"},{"key":"2107_CR33","doi-asserted-by":"crossref","unstructured":"Ranjan, A., Black, M.J.: Optical flow estimation using a spatial pyramid network. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 2720\u20132729, Honolulu, HI, USA (2017)","DOI":"10.1109\/CVPR.2017.291"},{"key":"2107_CR34","doi-asserted-by":"crossref","unstructured":"Sun, D., Yang, X., Liu, M.Y., et al.: PWC-Net: CNNs for optical flow using pyramid, warping, and cost volume. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 8934\u20138943, Salt Lake City, UT, USA (2018)","DOI":"10.1109\/CVPR.2018.00931"},{"key":"2107_CR35","doi-asserted-by":"crossref","unstructured":"Hur, J., Roth, S.: Iterative residual refinement for joint optical flow and occlusion estimation. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 5754\u20135763, Seattle, USA (2020)","DOI":"10.1109\/CVPR.2019.00590"},{"key":"2107_CR36","doi-asserted-by":"crossref","unstructured":"Hui, T.W., Tang, X., Loy, C.C.: LiteFlowNet: a lightweight convolutional neural network for optical flow estimation. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 8981\u20138989, Salt Lake City, UT, USA (2018)","DOI":"10.1109\/CVPR.2018.00936"},{"key":"2107_CR37","doi-asserted-by":"crossref","unstructured":"Teed, Z., Deng, J.: RAFT: recurrent all-pairs field transforms for optical flow. abs\/2003.12039 (2020)","DOI":"10.24963\/ijcai.2021\/662"},{"key":"2107_CR38","unstructured":"Hui, T.W., Tang, X., Loy, C.C.: A lightweight optical flow CNN-revisiting data fidelity and regularization. abs\/1903.07414 (2020)"},{"key":"2107_CR39","doi-asserted-by":"crossref","unstructured":"Melekhov, I., Tiulpin, A., Sattler, T., et al.: Dgc-net: dense geometric correspondence network. In: IEEE Winter Conference on Applications of Computer Vision, pp. 1034\u20131042, Hawaii, USA (2019)","DOI":"10.1109\/WACV.2019.00115"},{"key":"2107_CR40","doi-asserted-by":"crossref","unstructured":"Truong, P., Danelljan, M., Timofte, R.: GLU-Net: global-local universal network for dense flow and correspondences. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 6258\u20136268, Seattle, USA (2020)","DOI":"10.1109\/CVPR42600.2020.00629"},{"issue":"8","key":"2107_CR41","doi-asserted-by":"publisher","first-page":"1558","DOI":"10.1109\/TPAMI.2014.2377715","volume":"37","author":"P Doll\u00e1r","year":"2015","unstructured":"Doll\u00e1r, P., Zitnick, C.L.: Fast edge detection using structured forests. IEEE Trans. Pattern Anal. Mach. Intell. 37(8), 1558\u20131570 (2015)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"2107_CR42","doi-asserted-by":"publisher","first-page":"161825","DOI":"10.1109\/ACCESS.2020.3021356","volume":"8","author":"RJ Mstafa","year":"2020","unstructured":"Mstafa, R.J., Younis, Y.M., Hussein, H.I.: A new video steganography scheme based on Shi-Tomasi corner detector. IEEE Access 8, 161825\u2013161837 (2020)","journal-title":"IEEE Access"},{"key":"2107_CR43","doi-asserted-by":"crossref","unstructured":"Leutenegger, S., Chli, M., Siegwart, R.: BRISK: binary robust invariant scalable key points. In: IEEE International Conference on Computer Vision, pp. 2548\u20132555, Barcelona, Spain (2011)","DOI":"10.1109\/ICCV.2011.6126542"},{"key":"2107_CR44","doi-asserted-by":"crossref","unstructured":"Redding, N.J., Ohmer, J.F., Kelly J., et al.: Cross-matching via feature matching for camera handover with non-overlapping fields of view. In: Digital Image Computing: Techniques and Applications, pp. 343\u2013350, Canberra, Australia (2008)","DOI":"10.1109\/DICTA.2008.38"},{"key":"2107_CR45","doi-asserted-by":"crossref","unstructured":"Bian, J., Lin, W. Y., Matsushita, Y., et al.: Gms: Grid-based motion statistics for fast, ultra-robust feature correspondence. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 4181\u20134190, Honolulu, HI, USA (2017)","DOI":"10.1109\/CVPR.2017.302"},{"key":"2107_CR46","volume-title":"All of Statistics: A Concise Course in Statistical Inference","author":"L Wasserman","year":"2010","unstructured":"Wasserman, L.: All of Statistics: A Concise Course in Statistical Inference. Springer, Berlin (2010)"},{"key":"2107_CR47","volume-title":"Multiple View Geometry in Computer Vision","author":"BP Wrobel","year":"2004","unstructured":"Wrobel, B.P.: Multiple View Geometry in Computer Vision. Cambridge University, Cambridge (2004)"},{"issue":"12","key":"2107_CR48","doi-asserted-by":"publisher","first-page":"6046","DOI":"10.1109\/TIP.2017.2751142","volume":"26","author":"K He","year":"2017","unstructured":"He, K., Zhen, R., Yan, J., Ge, Y.: Single-image shadow removal using 3D intensity surface modeling. IEEE Trans. Image Process. 26(12), 6046\u20136060 (2017)","journal-title":"IEEE Trans. Image Process."},{"issue":"2","key":"2107_CR49","doi-asserted-by":"publisher","first-page":"115","DOI":"10.1007\/s11263-013-0644-x","volume":"106","author":"D Sun","year":"2014","unstructured":"Sun, D., Roth, S., Black, M.J.: A quantitative analysis of current practices in optical flow estimation and the principles behind them. Int. J. Comput. Vis. 106(2), 115\u2013137 (2014)","journal-title":"Int. J. Comput. Vis."}],"container-title":["The Visual Computer"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-021-02107-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00371-021-02107-4\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-021-02107-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,4,13]],"date-time":"2022-04-13T13:11:19Z","timestamp":1649855479000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00371-021-02107-4"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,3,28]]},"references-count":49,"journal-issue":{"issue":"5","published-print":{"date-parts":[[2022,5]]}},"alternative-id":["2107"],"URL":"https:\/\/doi.org\/10.1007\/s00371-021-02107-4","relation":{},"ISSN":["0178-2789","1432-2315"],"issn-type":[{"type":"print","value":"0178-2789"},{"type":"electronic","value":"1432-2315"}],"subject":[],"published":{"date-parts":[[2021,3,28]]},"assertion":[{"value":"4 March 2021","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"28 March 2021","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Compliance with ethical standards"}},{"value":"We declare that we have no financial and personal relationships with other people or organizations that can inappropriately influence our work, there is no professional or other personal interest of any nature or kind in any product, service or company that could be construed as influencing the review of the manuscript.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}