{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,9]],"date-time":"2025-12-09T06:34:45Z","timestamp":1765262085081,"version":"3.46.0"},"reference-count":96,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"12","license":[{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"name":"Gda\u0144sk University of Technology through the Ventus Hydrogenii Redivivus\u2014\u201cExcellence Initiative\u2014Research University\u201d","award":["DEC-16\/1\/2022\/IDUB\/V.5d\/VHR"],"award-info":[{"award-number":["DEC-16\/1\/2022\/IDUB\/V.5d\/VHR"]}]},{"name":"Polish High-Performance Computing Infrastructure PLGrid [High Performance Computing (HPC) Center: Akademickie Centrum Komputerowe (ACK) Cyfronet Akademia G\u00f3rniczo-Hutnicza (AGH)]","award":["PLG\/2024\/017008"],"award-info":[{"award-number":["PLG\/2024\/017008"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Circuits Syst. Video Technol."],"published-print":{"date-parts":[[2025,12]]},"DOI":"10.1109\/tcsvt.2025.3575717","type":"journal-article","created":{"date-parts":[[2025,6,2]],"date-time":"2025-06-02T14:03:57Z","timestamp":1748873037000},"page":"12196-12211","source":"Crossref","is-referenced-by-count":0,"title":["Lifting Deep Image Denoisers to Video With Frame Interpolation Pre-Training"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1663-5839","authenticated-orcid":false,"given":"Piotr Kopa","family":"Ostrowski","sequence":"first","affiliation":[{"name":"Department of Decision Systems and Robotics, Faculty of Electronics, Telecommunications, and Informatics (ETI), Gda&#x0144;sk University of Technology, Gda&#x0144;sk, Poland"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7093-8764","authenticated-orcid":false,"given":"Daniel","family":"W\u0119sierski","sequence":"additional","affiliation":[{"name":"Department of Multimedia Systems, Faculty of ETI, Gda&#x0144;sk University of Technology, Gda&#x0144;sk, Poland"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8235-7641","authenticated-orcid":false,"given":"Anna","family":"Jezierska","sequence":"additional","affiliation":[{"name":"Department of Modelling and Optimization of Dynamical Systems, Systems Research Institute, Warsaw, Poland"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3952-5731","authenticated-orcid":false,"given":"Tomasz","family":"Stefa\u0144ski","sequence":"additional","affiliation":[{"name":"Department of Decision Systems and Robotics, Faculty of Electronics, Telecommunications, and Informatics (ETI), Gda&#x0144;sk University of Technology, Gda&#x0144;sk, Poland"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1137\/23M1545859"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00684"},{"key":"ref3","first-page":"1","article-title":"Addressing negative transfer in diffusion models","volume-title":"Proc. Adv. Neural Inform. Process. Syst.","volume":"36","author":"Go"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3383862"},{"key":"ref5","article-title":"Imagen video: High definition video generation with diffusion models","author":"Ho","year":"2022","journal-title":"arXiv:2210.02303"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2024.3390404"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/s10278-023-00935-5"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19800-7_24"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00588"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00328"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/s00371-024-03565-2"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2019.8803136"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-018-01144-2"},{"key":"ref14","article-title":"On the generalization of BasicVSR++ to video deblurring and denoising","author":"Chan","year":"2022","journal-title":"arXiv:2204.05308"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2022.3198317"},{"key":"ref16","first-page":"378","article-title":"Recurrent video restoration transformer with guided deformable attention","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Liang"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3611922"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2024.3372454"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19797-0_34"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00803"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-16449-1_18"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2019.00247"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00237"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2020.2999209"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/s10851-020-00995-0"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00216"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/WACV56688.2023.00043"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2019.00235"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3077140"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475477"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2023.109360"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i3.20182"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00143"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547934"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00347"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/tcsvt.2025.3553160"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00718"},{"key":"ref38","article-title":"No time to waste: Squeeze time into channel for mobile video understanding","author":"Zhai","year":"2024","journal-title":"arXiv:2405.08344"},{"key":"ref39","first-page":"1","article-title":"Video dynamics prior: An internal learning approach for robust video enhancements","volume-title":"Proc. Adv. Neural Inform. Process. Syst.","author":"Shrivastava"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58539-6_1"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3061062"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW54120.2021.00044"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00232"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3176210"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3299232"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-023-17468-2"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-022-10302-5"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-022-10305-2"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01576"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01348"},{"key":"ref51","first-page":"3537","article-title":"Restore from restored: Video restoration with pseudo clean cideo","volume-title":"Proc. IEEE Conf. Comput. Vis. Pattern Recog.","author":"Lee"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2024.104103"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01361"},{"key":"ref54","first-page":"515","article-title":"KD-MRI: A knowledge distillation framework for image reconstruction and image restoration in MRI workflow","volume-title":"Proc. MIDL","author":"Murugesan"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3290038"},{"key":"ref56","first-page":"1270","article-title":"Training your image restoration network better with random weight network as optimization function","volume-title":"Proc. Adv. Neural Inform. Process. Syst.","volume":"36","author":"Zheng"},{"key":"ref57","article-title":"Learning task-oriented flows to mutually guide feature alignment in synthesized and real video denoising","author":"Cao","year":"2022","journal-title":"arXiv:2208.11803"},{"key":"ref58","first-page":"1","article-title":"AnimeSR: Learning real-world super-resolution models for animation videos","volume-title":"Proc. Adv. Neural Inform. Process. Syst.","author":"Wu"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00340"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00215"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72992-8_20"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01267-0_11"},{"key":"ref63","article-title":"Toward accurate and temporally consistent video restoration from raw data","author":"Guo","year":"2023","journal-title":"arXiv:2312.16247"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00493"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46466-4_26"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-12939-2_39"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/WACV56688.2023.00211"},{"key":"ref68","first-page":"1","article-title":"Noise2Self: Blind denoising by self-supervision","volume-title":"Proc. ICML","author":"Batson"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00223"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1038\/s41592-023-02005-8"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3402095"},{"key":"ref72","first-page":"1","article-title":"High-quality self-supervised deep image denoising","volume-title":"Proc. Adv. Neural Inform. Process. Syst.","author":"Laine"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00178"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1038\/s41592-021-01285-2"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00169"},{"key":"ref76","article-title":"Self-supervised video representation learning with motion-aware masked autoencoders","author":"Yang","year":"2022","journal-title":"arXiv:2210.04154"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01432"},{"key":"ref78","first-page":"35946","article-title":"Masked autoencoders as spatiotemporal learners","volume-title":"Proc. Adv. Neural Inform. Process. Syst.","author":"Feichtenhofer"},{"issue":"3","key":"ref79","first-page":"2","article-title":"Recovering intrinsic scene characteristics","volume":"2","author":"Barrow","year":"1978","journal-title":"Comput. Vis. Syst."},{"key":"ref80","first-page":"1","article-title":"StyleGAN knows normal, depth, albedo, and more","volume-title":"Proc. Adv. Neural Inform. Process. Syst.","author":"Bhattad"},{"key":"ref81","first-page":"1","article-title":"Intrinsic LoRA: A generalist approach for discovering knowledge in generative models","volume-title":"Proc. Synth. Data Comput. Vis. Workshop","author":"Du"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00453"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01268"},{"key":"ref84","first-page":"1","article-title":"Deep inside convolutional networks: Visualising image classification models and saliency maps","volume-title":"Proc. Int. Conf. Learn. Represent","author":"Simonyan"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58536-5_24"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20071-7_2"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i2.25317"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00564"},{"key":"ref89","article-title":"CascadedGaze: Efficiency in global context extraction for image restoration","author":"Ghasemabadi","year":"2024","journal-title":"arXiv:2401.15235"},{"key":"ref90","article-title":"The 2017 Davis challenge on video object segmentation","author":"Pont-Tuset","year":"2017","journal-title":"arXiv:1704.00675"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2014.7025570"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1137\/15M1014395"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2008.2001399"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6854626"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-33783-3_44"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00947"}],"container-title":["IEEE Transactions on Circuits and Systems for Video Technology"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/76\/11278843\/11020664.pdf?arnumber=11020664","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,9]],"date-time":"2025-12-09T06:17:13Z","timestamp":1765261033000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11020664\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12]]},"references-count":96,"journal-issue":{"issue":"12"},"URL":"https:\/\/doi.org\/10.1109\/tcsvt.2025.3575717","relation":{},"ISSN":["1051-8215","1558-2205"],"issn-type":[{"type":"print","value":"1051-8215"},{"type":"electronic","value":"1558-2205"}],"subject":[],"published":{"date-parts":[[2025,12]]}}}