{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,25]],"date-time":"2025-09-25T00:14:56Z","timestamp":1758759296991,"version":"3.44.0"},"reference-count":55,"publisher":"Springer Science and Business Media LLC","issue":"13","license":[{"start":{"date-parts":[[2025,8,7]],"date-time":"2025-08-07T00:00:00Z","timestamp":1754524800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,8,7]],"date-time":"2025-08-07T00:00:00Z","timestamp":1754524800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"the Natural Science Foundation of Fujian Province","award":["No. 2022J01190"],"award-info":[{"award-number":["No. 2022J01190"]}]},{"name":"Funding for the Media Content Security Collaborative Innovation Platform Project of Fuxiaquan National Independent Innovation Demonstration Zone","award":["No. 2023-P-003"],"award-info":[{"award-number":["No. 2023-P-003"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Vis Comput"],"published-print":{"date-parts":[[2025,10]]},"DOI":"10.1007\/s00371-025-04126-x","type":"journal-article","created":{"date-parts":[[2025,8,7]],"date-time":"2025-08-07T09:40:18Z","timestamp":1754559618000},"page":"11657-11673","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Enhancing defocus blur detection through dual recurrent complementary residual refinement"],"prefix":"10.1007","volume":"41","author":[{"given":"Longrui","family":"Li","sequence":"first","affiliation":[]},{"given":"Liqing","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Tianqiang","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Haifeng","family":"Luo","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,8,7]]},"reference":[{"key":"4126_CR1","doi-asserted-by":"publisher","first-page":"829","DOI":"10.1109\/TCSVT.2020.2990623","volume":"313","author":"YQ Liu","year":"2021","unstructured":"Liu, Y.Q., Du, X., Shen, H.L., Chen, S.J.: Estimating generalized gaussian blur kernels for out-of-focus image deblurring. IEEE Trans. Circuits Syst. Video Technol. 313, 829\u2013843 (2021). https:\/\/doi.org\/10.1109\/TCSVT.2020.2990623","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"4126_CR2","doi-asserted-by":"publisher","first-page":"53","DOI":"10.1007\/s11263-010-0409-8","volume":"93","author":"C Zhou","year":"2011","unstructured":"Zhou, C., Lin, S., Nayar, S.K.: Coded aperture pairs for depth from defocus and defocus deblurring. Int. J. Comput. Vision 93, 53\u201372 (2011). https:\/\/doi.org\/10.1007\/s11263-010-0409-8","journal-title":"Int. J. Comput. Vision"},{"key":"4126_CR3","doi-asserted-by":"publisher","first-page":"170","DOI":"10.1016\/j.neucom.2012.01.017","volume":"86","author":"X Deng","year":"2012","unstructured":"Deng, X., Shen, Y., Song, M., Tao, D., Bu, J., Chen, C.: Video-based non-uniform object motion blur estimation and deblurring. Neurocomputing 86, 170\u2013178 (2012). https:\/\/doi.org\/10.1016\/j.neucom.2012.01.017","journal-title":"Neurocomputing"},{"key":"4126_CR4","doi-asserted-by":"publisher","first-page":"3693","DOI":"10.1007\/s00371-023-03059-7","volume":"405","author":"M Long","year":"2024","unstructured":"Long, M., Yu, X., Cong, S., Zoujian, W., Jiangbin, D., Jiayao, Z.: Face image deblurring with feature correction and fusion. Vis. Comput. 405, 3693\u20133707 (2024). https:\/\/doi.org\/10.1007\/s00371-023-03059-7","journal-title":"Vis. Comput."},{"key":"4126_CR5","doi-asserted-by":"publisher","first-page":"2226","DOI":"10.1109\/TMM.2022.3144890","volume":"25","author":"N Jiang","year":"2022","unstructured":"Jiang, N., Sheng, B., Li, P., Lee, T.Y.: Photohelper: portrait photographing guidance via deep feature retrieval and fusion. IEEE Trans. Multimedia 25, 2226\u20132238 (2022). https:\/\/doi.org\/10.1109\/TMM.2022.3144890","journal-title":"IEEE Trans. Multimedia"},{"key":"4126_CR6","doi-asserted-by":"publisher","first-page":"2881","DOI":"10.1007\/s00371-022-02499-x","volume":"397","author":"Z Liu","year":"2023","unstructured":"Liu, Z., Liu, J.: Hypergraph attentional convolutional neural network for salient object detection. Vis. Comput. 397, 2881\u20132907 (2023). https:\/\/doi.org\/10.1007\/s00371-022-02499-x","journal-title":"Vis. Comput."},{"key":"4126_CR7","doi-asserted-by":"crossref","unstructured":"Deng, H., Birdal, T., Ilic, S.: Ppfnet: Global context aware local features for robust 3d point matching. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 195\u2013205 (2018). https:\/\/doi.org\/10.48550\/arXiv.1802.02669","DOI":"10.1109\/CVPR.2018.00028"},{"key":"4126_CR8","doi-asserted-by":"publisher","unstructured":"Xia, Y., Xu, Y., Li, S., Wang, R., Du, J., Cremers, D., Stilla, U.: Soe-net: A self-attention and orientation encoding network for point cloud based place recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11348\u201311357 (2021.) https:\/\/doi.org\/10.1109\/CVPR46437.2021.01119","DOI":"10.1109\/CVPR46437.2021.01119"},{"key":"4126_CR9","doi-asserted-by":"publisher","first-page":"1182","DOI":"10.1109\/LRA.2023.3341766","volume":"92","author":"M Ramezani","year":"2024","unstructured":"Ramezani, M., Wang, L., Knights, J., Li, Z., Pounds, P., Moghadam, P.: Pose-graph attentional graph neural network for lidar place recognition. IEEE Robot. Autom. Lett. 92, 1182\u20131189 (2024). https:\/\/doi.org\/10.1109\/LRA.2023.3341766","journal-title":"IEEE Robot. Autom. Lett."},{"key":"4126_CR10","doi-asserted-by":"publisher","first-page":"447","DOI":"10.1007\/s00371-020-01814-8","volume":"373","author":"S Gupta","year":"2021","unstructured":"Gupta, S., Thakur, K., Kumar, M.: 2d-human face recognition using sift and surf descriptors of face\u2019s feature regions. Vis. Comput. 373, 447\u2013456 (2021). https:\/\/doi.org\/10.1007\/s00371-020-01814-8","journal-title":"Vis. Comput."},{"key":"4126_CR11","doi-asserted-by":"publisher","first-page":"2230","DOI":"10.1002\/cav.2230","volume":"352","author":"D Bellenger","year":"2024","unstructured":"Bellenger, D., Chen, M., Xu, Z.: Facial emotion recognition with a reduced feature set for video game and metaverse avatars. Comput. Animation Virt. Worlds 352, 2230 (2024). https:\/\/doi.org\/10.1002\/cav.2230","journal-title":"Comput. Animation Virt. Worlds"},{"key":"4126_CR12","doi-asserted-by":"publisher","first-page":"8013","DOI":"10.3390\/s24248013","volume":"2424","author":"S Umirzakova","year":"2024","unstructured":"Umirzakova, S., Muksimova, S., Mardieva, S., Sultanov Baxtiyarovich, M., Cho, Y.I.: Mira-cap: memory-integrated retrieval-augmented captioning for state-of-the-art image and video captioning. Sensors 2424, 8013 (2024). https:\/\/doi.org\/10.3390\/s24248013","journal-title":"Sensors"},{"key":"4126_CR13","doi-asserted-by":"publisher","unstructured":"Shi, J., Xu, L., Jia, J.: Discriminative blur detection features. In: 2014 IEEE Conference on Computer Vision and Pattern Recognition, pp. 2965\u20132972 (2014). https:\/\/doi.org\/10.1109\/CVPR.2014.379","DOI":"10.1109\/CVPR.2014.379"},{"key":"4126_CR14","doi-asserted-by":"publisher","first-page":"1626","DOI":"10.1109\/TIP.2016.2528042","volume":"254","author":"X Yi","year":"2016","unstructured":"Yi, X., Eramian, M.: Lbp-based segmentation of defocus blur. IEEE Trans. Image Process. 254, 1626\u20131638 (2016). https:\/\/doi.org\/10.1109\/TIP.2016.2528042","journal-title":"IEEE Trans. Image Process."},{"key":"4126_CR15","doi-asserted-by":"publisher","first-page":"1652","DOI":"10.1109\/LSP.2016.2611608","volume":"2311","author":"C Tang","year":"2016","unstructured":"Tang, C., Wu, J., Hou, Y., Wang, P., Li, W.: A spectral and spatial approach of coarse-to-fine blurred image region detection. IEEE Signal Process. Lett. 2311, 1652\u20131656 (2016). https:\/\/doi.org\/10.1109\/LSP.2016.2611608","journal-title":"IEEE Signal Process. Lett."},{"key":"4126_CR16","doi-asserted-by":"publisher","unstructured":"Zhao, W., Zheng, B., Lin, Q., Lu, H.: Enhancing diversity of defocus blur detectors via cross-ensemble network. In: 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 8897\u20138905 (2019). https:\/\/doi.org\/10.1109\/CVPR.2019.00911","DOI":"10.1109\/CVPR.2019.00911"},{"key":"4126_CR17","doi-asserted-by":"publisher","first-page":"5426","DOI":"10.1109\/TIP.2021.3084101","volume":"30","author":"W Zhao","year":"2021","unstructured":"Zhao, W., Hou, X., He, Y., Lu, H.: Defocus blur detection via boosting diversity of deep ensemble networks. IEEE Trans. Image Process. 30, 5426\u20135438 (2021). https:\/\/doi.org\/10.1109\/TIP.2021.3084101","journal-title":"IEEE Trans. Image Process."},{"key":"4126_CR18","doi-asserted-by":"publisher","unstructured":"Tang, C., Liu, X., Zhu, X., Zhu, E., Sun, K., Wang, P., Wang, L., Zomaya, A.: R$$^{2}$$MRF: defocus blur detection via recurrently refining multi-scale residual features. In: Proceedings of the AAAI Conference on Artificial Intelligence, pp. 12063\u201312070 (2020). https:\/\/doi.org\/10.1609\/aaai.v34i07.6884","DOI":"10.1609\/aaai.v34i07.6884"},{"key":"4126_CR19","doi-asserted-by":"publisher","first-page":"624","DOI":"10.1109\/TMM.2020.2985541","volume":"23","author":"C Tang","year":"2021","unstructured":"Tang, C., Liu, X., An, S., Wang, P.: BRzNet: defocus blur detection via a bidirectional channel attention residual refining network. IEEE Trans. Multimedia 23, 624\u2013635 (2021). https:\/\/doi.org\/10.1109\/TMM.2020.2985541","journal-title":"IEEE Trans. Multimedia"},{"key":"4126_CR20","doi-asserted-by":"publisher","unstructured":"Su, B., Lu, S., Tan, C.L.: Blurred image region detection and classification. In: Proceedings of the 19th ACM International Conference on Multimedia, pp. 1397\u20131400 (2011). https:\/\/doi.org\/10.1145\/2072298.2072024","DOI":"10.1145\/2072298.2072024"},{"key":"4126_CR21","doi-asserted-by":"publisher","first-page":"2220","DOI":"10.1109\/TCYB.2015.2472478","volume":"4610","author":"Y Pang","year":"2016","unstructured":"Pang, Y., Zhu, H., Li, X., Li, X.: Classifying discriminative features for blur detection. IEEE Transa. Cybernet. 4610, 2220\u20132227 (2016). https:\/\/doi.org\/10.1109\/TCYB.2015.2472478","journal-title":"IEEE Transa. Cybernet."},{"key":"4126_CR22","doi-asserted-by":"publisher","unstructured":"Xu, G., Quan, Y., Ji, H.: Estimating defocus blur via rank of local patches. In: 2017 IEEE International Conference on Computer Vision (ICCV), pp. 5381\u20135389 (2017). https:\/\/doi.org\/10.1109\/ICCV.2017.574","DOI":"10.1109\/ICCV.2017.574"},{"key":"4126_CR23","doi-asserted-by":"publisher","first-page":"1652","DOI":"10.1109\/LSP.2016.2611608","volume":"2311","author":"C Tang","year":"2016","unstructured":"Tang, C., Wu, J., Hou, Y., Wang, P., Li, W.: A spectral and spatial approach of coarse-to-fine blurred image region detection. IEEE Signal Process. Lett. 2311, 1652\u20131656 (2016). https:\/\/doi.org\/10.1109\/LSP.2016.2611608","journal-title":"IEEE Signal Process. Lett."},{"key":"4126_CR24","doi-asserted-by":"publisher","unstructured":"Golestaneh, S.A., Karam, L.J.: Spatially-varying blur detection based on multiscale fused and sorted transform coefficients of gradient magnitudes. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 596\u2013605 (2017). https:\/\/doi.org\/10.1109\/CVPR.2017.71","DOI":"10.1109\/CVPR.2017.71"},{"key":"4126_CR25","doi-asserted-by":"publisher","first-page":"239","DOI":"10.1016\/j.neucom.2018.05.106","volume":"312","author":"X Zhu","year":"2018","unstructured":"Zhu, X., Tang, C., Wang, P., Xu, H., Wang, M., Chen, J., Tian, J.: Saliency detection via affinity graph learning and weighted manifold ranking. Neurocomputing 312, 239\u2013250 (2018). https:\/\/doi.org\/10.1016\/j.neucom.2018.05.106","journal-title":"Neurocomputing"},{"key":"4126_CR26","doi-asserted-by":"publisher","first-page":"15","DOI":"10.1109\/TCSVT.2016.2602308","volume":"281","author":"Z Zhang","year":"2018","unstructured":"Zhang, Z., Liu, Y., Xiong, Z., Li, J., Zhang, M.: Focus and blurriness measure using reorganized DCT coefficients for an autofocus application. IEEE Trans. Circuits Syst. Video Technol. 281, 15\u201330 (2018). https:\/\/doi.org\/10.1109\/TCSVT.2016.2602308","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"4126_CR27","doi-asserted-by":"publisher","first-page":"3571","DOI":"10.1109\/TCSVT.2019.2944915","volume":"3010","author":"H Kumar","year":"2020","unstructured":"Kumar, H., Gupta, S., Venkatesh, K.S.: Simultaneous estimation of defocus and motion blurs from single image using equivalent gaussian representation. IEEE Trans. Circuits Syst. Video Technol. 3010, 3571\u20133583 (2020). https:\/\/doi.org\/10.1109\/TCSVT.2019.2944915","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"4126_CR28","doi-asserted-by":"publisher","first-page":"538","DOI":"10.1016\/j.vrih.2023.06.008","volume":"56","author":"T Junjie","year":"2023","unstructured":"Junjie, T., Yinghui, W., Haomiao, M., Tao, Y., Lingyu, A., Wei, L.: Image defocus deblurring method based on gradient difference of boundary neighborhood. Virtual Reality Intell. Hardw. 56, 538\u2013549 (2023). https:\/\/doi.org\/10.1016\/j.vrih.2023.06.008","journal-title":"Virtual Reality Intell. Hardw."},{"key":"4126_CR29","doi-asserted-by":"publisher","unstructured":"Park, J., Tai, Y.W., Cho, D., Kweon, I.S.: A unified approach of multi-scale deep and hand-crafted features for defocus estimation. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2760\u20132769 (2017). https:\/\/doi.org\/10.1109\/CVPR.2017.295","DOI":"10.1109\/CVPR.2017.295"},{"key":"4126_CR30","doi-asserted-by":"publisher","unstructured":"Zhao, W., Zhao, F., Wang, D., Lu, H.: Defocus blur detection via multi-stream bottom-top-bottom fully convolutional network. In: 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3080\u20133088 (2018). https:\/\/doi.org\/10.1109\/CVPR.2018.00325","DOI":"10.1109\/CVPR.2018.00325"},{"key":"4126_CR31","doi-asserted-by":"publisher","first-page":"2719","DOI":"10.1109\/TCSVT.2021.3095347","volume":"325","author":"F Zhao","year":"2022","unstructured":"Zhao, F., Lu, H., Zhao, W., Yao, L.: Image-scale-symmetric cooperative network for defocus blur detection. IEEE Trans. Circuits Syst. Video Technol. 325, 2719\u20132731 (2022). https:\/\/doi.org\/10.1109\/TCSVT.2021.3095347","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"4126_CR32","doi-asserted-by":"publisher","first-page":"2107","DOI":"10.1109\/TIP.2018.2881830","volume":"285","author":"K Zeng","year":"2019","unstructured":"Zeng, K., Wang, Y., Mao, J., Liu, J., Peng, W., Chen, N.: A local metric for defocus blur detection based on CNN feature learning. IEEE Trans. Image Process. 285, 2107\u20132115 (2019). https:\/\/doi.org\/10.1109\/TIP.2018.2881830","journal-title":"IEEE Trans. Image Process."},{"key":"4126_CR33","doi-asserted-by":"publisher","first-page":"7719","DOI":"10.1109\/TNNLS.2022.3146004","volume":"3410","author":"Y Zhou","year":"2022","unstructured":"Zhou, Y., Chen, Z., Li, P., Song, H., Chen, C.P., Sheng, B.: Fsad-net: feedback spatial attention dehazing network. IEEE Trans. Neural Netw. Learning Syst. 3410, 7719\u20137733 (2022). https:\/\/doi.org\/10.1109\/TNNLS.2022.3146004","journal-title":"IEEE Trans. Neural Netw. Learning Syst."},{"key":"4126_CR34","doi-asserted-by":"publisher","first-page":"2201","DOI":"10.1002\/cav.2201","volume":"351","author":"X Zhu","year":"2024","unstructured":"Zhu, X., Yao, X., Zhang, J., Zhu, M., You, L., Yang, X., Zhang, J., Zhao, H., Zeng, D.: Tmsdnet: transformer with multi-scale dense network for single and multi-view 3D reconstruction. Comput. Animation Virtual Worlds 351, 2201 (2024). https:\/\/doi.org\/10.1002\/cav.2201","journal-title":"Comput. Animation Virtual Worlds"},{"key":"4126_CR35","doi-asserted-by":"publisher","unstructured":"Du, J., Wang, R., Cremers, D.: Dh3d: Deep hierarchical 3d descriptors for robust large-scale 6dof relocalization. In: Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part IV 16, pp. 744\u2013762 (2020). https:\/\/doi.org\/10.1007\/978-3-030-58548-8 . Springer","DOI":"10.1007\/978-3-030-58548-8"},{"key":"4126_CR36","doi-asserted-by":"publisher","unstructured":"Fan, Z., Song, Z., Liu, H., Lu, Z., He, J., Du, X.: Svt-net: Super light-weight sparse voxel transformer for large scale place recognition. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 36, pp. 551\u2013560 (2022). https:\/\/doi.org\/10.1609\/aaai.v36i1.19934","DOI":"10.1609\/aaai.v36i1.19934"},{"key":"4126_CR37","doi-asserted-by":"publisher","unstructured":"Xia, Y., Gladkova, M., Wang, R., Li, Q., Stilla, U., Henriques, J.F., Cremers, D.: Casspr: Cross attention single scan place recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 8461\u20138472 (2023). https:\/\/doi.org\/10.48550\/arXiv.2211.12542","DOI":"10.48550\/arXiv.2211.12542"},{"key":"4126_CR38","doi-asserted-by":"publisher","first-page":"14426","DOI":"10.1007\/s10489-022-03303-y","volume":"5212","author":"Z Zhao","year":"2022","unstructured":"Zhao, Z., Yang, H., Luo, H.: Defocus blur detection via transformer encoder and edge guidance. Appl. Intell. 5212, 14426\u201314439 (2022). https:\/\/doi.org\/10.1007\/s10489-022-03303-y","journal-title":"Appl. Intell."},{"key":"4126_CR39","doi-asserted-by":"publisher","first-page":"53095","DOI":"10.1007\/s11042-023-17560-7","volume":"8317","author":"S Chai","year":"2024","unstructured":"Chai, S., Zhao, X., Zhang, J., Kan, J.: Defocus blur detection based on transformer and complementary residual learning. Multimed. Tools Appl. 8317, 53095\u201353118 (2024). https:\/\/doi.org\/10.1007\/s11042-023-17560-7","journal-title":"Multimed. Tools Appl."},{"key":"4126_CR40","doi-asserted-by":"publisher","unstructured":"Chollet, F.: Xception: Deep learning with depthwise separable convolutions. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1800\u20131807 (2017). https:\/\/doi.org\/10.1109\/CVPR.2017.195","DOI":"10.1109\/CVPR.2017.195"},{"key":"4126_CR41","doi-asserted-by":"publisher","unstructured":"Woo, S., Park, J., Lee, J.Y., Kweon, I.S.: Cbam: Convolutional block attention module. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 3\u201319 (2018). https:\/\/doi.org\/10.1007\/978-3-030-01234-21","DOI":"10.1007\/978-3-030-01234-21"},{"key":"4126_CR42","doi-asserted-by":"publisher","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770\u2013778 (2016). https:\/\/doi.org\/10.1109\/CVPR.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"4126_CR43","doi-asserted-by":"publisher","unstructured":"Lee, S., Park, S.J., Hong, K.S.: Rdfnet: Rgb-d multi-level residual feature fusion for indoor semantic segmentation. In: 2017 IEEE International Conference on Computer Vision (ICCV), pp. 4990\u20134999 (2017). https:\/\/doi.org\/10.1109\/ICCV.2017.533","DOI":"10.1109\/ICCV.2017.533"},{"key":"4126_CR44","doi-asserted-by":"publisher","unstructured":"Xie, S., Tu, Z.: Holistically-nested edge detection. In: 2015 IEEE International Conference on Computer Vision (ICCV), pp. 1395\u20131403 (2015). https:\/\/doi.org\/10.1109\/ICCV.2015.164","DOI":"10.1109\/ICCV.2015.164"},{"key":"4126_CR45","doi-asserted-by":"publisher","unstructured":"Jiang, Z., Xu, X., Zhang, C., Zhu, C.: Multianet: a multi-attention network for defocus blur detection. In: 2020 IEEE 22nd International Workshop on Multimedia Signal Processing (MMSP), pp. 1\u20136 (2020). https:\/\/doi.org\/10.1109\/MMSP48831.2020.9287072 . IEEE","DOI":"10.1109\/MMSP48831.2020.9287072"},{"key":"4126_CR46","doi-asserted-by":"publisher","unstructured":"Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. In: 3rd International Conference on Learning Representations, ICLR 2015,San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings (2015). https:\/\/doi.org\/10.48550\/arXiv.1412.6980","DOI":"10.48550\/arXiv.1412.6980"},{"key":"4126_CR47","doi-asserted-by":"publisher","first-page":"2622","DOI":"10.1007\/s11263-021-01490-8","volume":"129","author":"DP Fan","year":"2017","unstructured":"Fan, D.P., Cheng, M.M., Liu, Y., Li, T., Borji, A.: Structure-measure: a new way to evaluate foreground maps. Int. J. Comput. Vision 129, 2622\u20132638 (2017). https:\/\/doi.org\/10.1007\/s11263-021-01490-8","journal-title":"Int. J. Comput. Vision"},{"key":"4126_CR48","doi-asserted-by":"publisher","unstructured":"Margolin, R., Zelnik-Manor, L., Tal, A.: How to evaluate foreground maps. In: 2014 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248\u2013255 (2014). https:\/\/doi.org\/10.1109\/CVPR.2014.39","DOI":"10.1109\/CVPR.2014.39"},{"key":"4126_CR49","doi-asserted-by":"publisher","first-page":"79","DOI":"10.3354\/cr030079","volume":"30","author":"CJ Willmott","year":"2005","unstructured":"Willmott, C.J., Matsuura, K.: Advantages of the mean absolute error (MAE) over the root mean square error (RMSE) in assessing average model performance. Climate Res. 30, 79\u201382 (2005). https:\/\/doi.org\/10.3354\/cr030079","journal-title":"Climate Res."},{"key":"4126_CR50","doi-asserted-by":"publisher","first-page":"9228","DOI":"10.1109\/TMM.2023.3248162","volume":"25","author":"W Zhao","year":"2023","unstructured":"Zhao, W., Wei, F., Wang, H., He, Y., Lu, H.: Full-scene defocus blur detection with defbd+ via multi-level distillation learning. IEEE Trans. Multimedia 25, 9228\u20139240 (2023). https:\/\/doi.org\/10.1109\/TMM.2023.3248162","journal-title":"IEEE Trans. Multimedia"},{"key":"4126_CR51","doi-asserted-by":"publisher","first-page":"1158","DOI":"10.1109\/TIP.2023.3240856","volume":"32","author":"J Li","year":"2023","unstructured":"Li, J., Liang, B., Lu, X., Li, M., Lu, G., Xu, Y.: From global to local: Multi-patch and multi-scale contrastive similarity learning for unsupervised defocus blur detection. IEEE Trans. Image Process. 32, 1158\u20131169 (2023). https:\/\/doi.org\/10.1109\/TIP.2023.3240856","journal-title":"IEEE Trans. Image Process."},{"key":"4126_CR52","doi-asserted-by":"publisher","unstructured":"Zhang, W., Yan, Q., Xiao, C.: Detail preserved point cloud completion via separated feature aggregation. In: Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part XXV 16, pp. 512\u2013528 (2020). https:\/\/doi.org\/10.1007\/978-3-030-58595-231 . Springer","DOI":"10.1007\/978-3-030-58595-231"},{"key":"4126_CR53","doi-asserted-by":"publisher","unstructured":"Xia, Y., Xia, Y., Li, W., Song, R., Cao, K., Stilla, U.: Asfm-net: Asymmetrical siamese feature matching network for point completion. In: Proceedings of the 29th ACM International Conference on Multimedia, pp. 1938\u20131947 (2021). https:\/\/doi.org\/10.1145\/3474085.3475348","DOI":"10.1145\/3474085.3475348"},{"key":"4126_CR54","doi-asserted-by":"publisher","unstructured":"Abuolaim, A., Brown, M.S.: Defocus deblurring using dual-pixel data. In: Computer Vision \u2013 ECCV 2020, pp. 111\u2013126 (2020). https:\/\/doi.org\/10.48550\/arXiv.2005.00305","DOI":"10.48550\/arXiv.2005.00305"},{"key":"4126_CR55","doi-asserted-by":"publisher","unstructured":"Ruan, L., Bemana, M., Seidel, H.P., Myszkowski, K., Chen, B.: Revisiting image deblurring with an efficient convnet. ArXiv abs\/2302.02234 (2023) https:\/\/doi.org\/10.48550\/arXiv.2302.02234","DOI":"10.48550\/arXiv.2302.02234"}],"container-title":["The Visual Computer"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-025-04126-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00371-025-04126-x\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-025-04126-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,24]],"date-time":"2025-09-24T14:00:01Z","timestamp":1758722401000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00371-025-04126-x"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,7]]},"references-count":55,"journal-issue":{"issue":"13","published-print":{"date-parts":[[2025,10]]}},"alternative-id":["4126"],"URL":"https:\/\/doi.org\/10.1007\/s00371-025-04126-x","relation":{},"ISSN":["0178-2789","1432-2315"],"issn-type":[{"type":"print","value":"0178-2789"},{"type":"electronic","value":"1432-2315"}],"subject":[],"published":{"date-parts":[[2025,8,7]]},"assertion":[{"value":"18 July 2025","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 August 2025","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"The authors declare no competing interests.","order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}]}}