{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,12]],"date-time":"2026-03-12T21:21:18Z","timestamp":1773350478154,"version":"3.50.1"},"reference-count":74,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U23B2009"],"award-info":[{"award-number":["U23B2009"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["92270116"],"award-info":[{"award-number":["92270116"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Comput. Imaging"],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/tci.2024.3443732","type":"journal-article","created":{"date-parts":[[2024,8,15]],"date-time":"2024-08-15T17:46:23Z","timestamp":1723743983000},"page":"1207-1220","source":"Crossref","is-referenced-by-count":12,"title":["Decoupling Image Deblurring Into Twofold: A Hierarchical Model for Defocus Deblurring"],"prefix":"10.1109","volume":"10","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0173-1385","authenticated-orcid":false,"given":"Pengwei","family":"Liang","sequence":"first","affiliation":[{"name":"School of Computer Science and Technology, Harbin Institute of Technology, Harbin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5694-505X","authenticated-orcid":false,"given":"Junjun","family":"Jiang","sequence":"additional","affiliation":[{"name":"School of Computer Science and Technology, Harbin Institute of Technology, Harbin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8857-1785","authenticated-orcid":false,"given":"Xianming","family":"Liu","sequence":"additional","affiliation":[{"name":"School of Computer Science and Technology, Harbin Institute of Technology, Harbin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3264-3265","authenticated-orcid":false,"given":"Jiayi","family":"Ma","sequence":"additional","affiliation":[{"name":"Electronic Information School, Wuhan University, Wuhan, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-022-01633-5"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TCI.2019.2948780"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.91"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TCI.2021.3136759"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01264-9_5"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58607-2_7"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/WACV51458.2022.00016"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/JAS.2022.105563"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01582"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00207"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00264"},{"key":"ref12","first-page":"20812","article-title":"Gaussian kernel mixture network for single image defocus deblurring","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Quan","year":"2021"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00557"},{"key":"ref14","article-title":"Deep ViT features as dense visual descriptors","volume-title":"Proc. Eur. Conf. Comput. Vis. Workshops","author":"Amir","year":"2022"},{"key":"ref15","first-page":"23296","article-title":"Intriguing properties of vision transformers","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Naseer","year":"2021"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"ref17","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford","year":"2021"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"ref19","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Dosovitskiy","year":"2021"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2024.3359810"},{"key":"ref21","article-title":"How to understand masked autoencoders","author":"Cao","year":"2022"},{"key":"ref22","article-title":"Unlocking masked autoencoders as loss function for image and video restoration","author":"Zhou","year":"2023"},{"issue":"1","key":"ref23","first-page":"1","article-title":"A path towards autonomous machine intelligence version 0.9.2, 2022-06-27","volume":"62","author":"LeCun","year":"2022","journal-title":"Open Rev."},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TCI.2021.3092891"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3127850"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i3.25446"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3151099"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i2.25235"},{"key":"ref29","article-title":"Revisiting image deblurring with an efficient convnet","author":"Ruan","year":"2023"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01158"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00564"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2023.3341700"},{"key":"ref33","first-page":"8898","article-title":"PromptRestorer: A prompting image restoration method with degradation perception","volume-title":"Proc. 37th Conf. Neural Inf. Process. Syst.","author":"Wang","year":"2023"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3330416"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01195"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2022.11.014"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/TCI.2018.2889959"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TCI.2021.3063872"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TCI.2020.3039564"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/TCI.2022.3228633"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TCI.2020.3046189"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/JAS.2022.105686"},{"key":"ref43","article-title":"iBOT: Image BERT pre-training with online tokenizer","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhou","year":"2022"},{"key":"ref44","article-title":"CoCa: Contrastive captioners are image-text foundation models","author":"Yu","year":"2022","journal-title":"Trans. Mach. Learn. Res."},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3348486"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01426"},{"key":"ref47","article-title":"HiViT: A simpler and more efficient design of hierarchical vision transformer","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhang","year":"2023"},{"key":"ref48","article-title":"BEIT: BERT pre-training of image transformers","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Bao","year":"2021"},{"key":"ref49","article-title":"DINOv2: Learning robust visual features without supervision","author":"Oquab","year":"2023","journal-title":"Trans. Mach. Learn. Res."},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00574"},{"key":"ref51","first-page":"23716","article-title":"Flamingo: A visual language model for few-shot learning","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Alayrac","year":"2022"},{"key":"ref52","first-page":"12888","article-title":"Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li","year":"2022"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01457"},{"key":"ref54","article-title":"Florence: A new foundation model for computer vision","author":"Yuan","year":"2021"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01229"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01838"},{"key":"ref57","article-title":"Unified-io: A unified model for vision, language, and multi-modal tasks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Lu","year":"2022"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3236459"},{"key":"ref59","article-title":"VICReg: Variance-invariance-covariance regularization for self-supervised learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Bardes","year":"2022"},{"key":"ref60","article-title":"Large batch training of convolutional networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"You","year":"2018"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298665"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2017.2771563"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01250"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2003.819861"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00068"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00510"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2011.2109730"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01008"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.164"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.35"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20071-7_29"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00281"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3319330"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.379"}],"container-title":["IEEE Transactions on Computational Imaging"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6745852\/10398876\/10637737.pdf?arnumber=10637737","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,14]],"date-time":"2025-05-14T17:34:05Z","timestamp":1747244045000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10637737\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":74,"URL":"https:\/\/doi.org\/10.1109\/tci.2024.3443732","relation":{},"ISSN":["2333-9403","2334-0118","2573-0436"],"issn-type":[{"value":"2333-9403","type":"electronic"},{"value":"2334-0118","type":"electronic"},{"value":"2573-0436","type":"print"}],"subject":[],"published":{"date-parts":[[2024]]}}}