{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T02:03:39Z","timestamp":1773194619353,"version":"3.50.1"},"reference-count":101,"publisher":"IEEE","license":[{"start":{"date-parts":[[2026,2,16]],"date-time":"2026-02-16T00:00:00Z","timestamp":1771200000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,2,16]],"date-time":"2026-02-16T00:00:00Z","timestamp":1771200000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026,2,16]]},"DOI":"10.1109\/icnc68183.2026.11416952","type":"proceedings-article","created":{"date-parts":[[2026,3,9]],"date-time":"2026-03-09T19:55:54Z","timestamp":1773086154000},"page":"1-7","source":"Crossref","is-referenced-by-count":0,"title":["Deep Restoration of Archival Videos: Developments, Challenges, and Opportunities"],"prefix":"10.1109","author":[{"given":"Shiv","family":"Gehlot","sequence":"first","affiliation":[{"name":"Dolby Laboratories Inc"}]},{"given":"Sri Harsha","family":"Musunuri","sequence":"additional","affiliation":[{"name":"Dolby Laboratories Inc"}]},{"given":"Sutanu","family":"Bera","sequence":"additional","affiliation":[{"name":"Dolby Laboratories Inc"}]},{"given":"Aupendu","family":"Kar","sequence":"additional","affiliation":[{"name":"Dolby Laboratories Inc"}]},{"given":"Guan-Ming","family":"Su","sequence":"additional","affiliation":[{"name":"Dolby Laboratories Inc"}]}],"member":"263","reference":[{"key":"ref1","first-page":"473","article-title":"Detection and removal of film dirt and scratches in archived film sequences","volume-title":"Proc. IEEE Int. Conf. Acoustics, Speech, and Signal Processing","author":"Kokaram"},{"issue":"4","key":"ref2","first-page":"574","article-title":"Noise modeling and estimation for video processing","volume":"14","author":"Li","year":"2004","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4471-3485-5"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/18.382009"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP49359.2023.10222243"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2005.38"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/MIPR67560.2025.00035"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2023.3308726"},{"key":"ref9","first-page":"5436","article-title":"Flow-based video denoising with consistent texture preservation","volume-title":"Proc. IEEE Int. Conf. Computer Vision","author":"Liang"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP55913.2025.11084309"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2005.38"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2007.901238"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2012.2199324"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2017.2662206"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00143"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19784-0_16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46487-9_40"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20071-7_1"},{"key":"ref19","first-page":"8144","article-title":"Lga-net: Learning local and global affinities for sparse scribble based image colorization","volume-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision","author":"Lyu"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/WACV56688.2023.00183"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1145\/3550454.3555471"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/3197517.3201365"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.180"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr42600.2020.00311"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00763"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00257"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00771"},{"key":"ref28","article-title":"Stripformer: Strip transformer for fast image deblurring","volume-title":"European Conference on Computer Vision (ECCV)","author":"Zhang"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00338"},{"key":"ref30","article-title":"Event-based video reconstruction via exploiting complementary information for motion deblurring","author":"Chen","year":"2022","journal-title":"IEEE Transactions on Image Processing"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-032-04546-1_5"},{"key":"ref32","article-title":"Video deblurring with conditional diffusion models","author":"Ho","year":"2022","journal-title":"Neural Information Processing Systems (NeurIPS)"},{"key":"ref33","article-title":"Towards unified image deblurring using a mixture-of-experts decoder","author":"Feijoo","year":"2025","journal-title":"arXiv preprint arXiv:2508.06228"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.431"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2020.2984098"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2018.2810539"},{"key":"ref37","article-title":"Deep retinex decomposition for low-light enhancement","author":"Wei","year":"2018","journal-title":"arXiv preprint arXiv:1808.04560"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01149"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1016\/j.patrec.2018.01.010"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i3.25364"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00185"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2024.3486610"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.740"},{"key":"ref44","article-title":"Universal style transfer via feature transforms","volume":"30","author":"Li","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/WACV51458.2022.00303"},{"key":"ref46","first-page":"6207","article-title":"Temporal consistent semantic video color transfer from multiple","volume-title":"Proceedings of the Computer Vision and Pattern Recognition Conference","author":"Kar"},{"key":"ref47","article-title":"Nlut: Neural-based 3d lookup tables for video photorealistic style transfer","author":"Chen","year":"2023","journal-title":"arXiv preprint arXiv:2303.09170"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/TCOM.1981.1094952"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/TCE.2007.381754"},{"key":"ref50","article-title":"Digital Video and HDTV: Algorithms and Interfaces","author":"Poynton","year":"2003"},{"key":"ref51","article-title":"ITU-R BT.601: Studio encoding parameters of digital television for standard 4:3 and wide-screen 16:9 aspect ratios","year":"2011","journal-title":"International Telecommunication Union"},{"key":"ref52","first-page":"690","article-title":"Edge-directed interpolation for deinterlacing","volume-title":"Proc. Int. Conf. Image Process.","volume":"3","author":"Li"},{"issue":"3","key":"ref53","first-page":"1423","article-title":"Motion adaptive deinterlacing based on edge pattern analysis","volume":"55","author":"Traverso","year":"2009","journal-title":"IEEE Trans. Consum. Electron."},{"issue":"8","key":"ref54","first-page":"1037","article-title":"Motion-compensated deinterlacing using adaptive edge-oriented interpolation","volume":"15","author":"Zhang","year":"2005","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"ref55","article-title":"Real-time deep video deinterlacing","author":"Zhu","year":"2017","journal-title":"arXiv preprint arXiv:1708.00187"},{"key":"ref56","article-title":"Deep deinterlacing","volume-title":"SMPTE Annual Tech. Conf. Exhib.","author":"Bernasconi"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2021.3112548"},{"key":"ref58","article-title":"MFDIN: Multi-frame joint enhancement for video deinterlacing","volume-title":"Proc. IEEE Int. Conf. Comput. Vis.","author":"Chen"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2003.815165"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2012.2221191"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-51811-4_3"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW53098.2021.00030"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746702"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2021.3128275"},{"key":"ref65","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume":"33","author":"Ho","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-96-2071-5_12"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00245"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.3390\/s24061907"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1145\/3746266.3762159"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.37"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00183"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00382"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00945"},{"key":"ref74","article-title":"Xvfi: Cross-space video frame interpolation transformer","author":"Sim","year":"2023","journal-title":"CVPR"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3680846"},{"key":"ref76","article-title":"Videodiff: Diffusion-based video frame interpolation with temporal consistency","volume-title":"European Conference on Computer Vision (ECCV)","author":"Lee"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1016\/1049-9652(91)90045-L"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2003.1203207"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00588"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.52202\/068431-0028"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02400"},{"key":"ref82","article-title":"VideoGigaGAN: Towards detail-rich video superresolution","author":"Xu","year":"2024","journal-title":"arXiv preprint arXiv:2404.12388"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW54120.2021.00217"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01693"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72764-1_1"},{"key":"ref86","article-title":"PromptIR: Prompting for all-in-one blind image restoration","volume":"36","author":"Potlapalli","year":"2023","journal-title":"Advances in Neural Information Processing Systems (NeurIPS)"},{"key":"ref87","article-title":"SPIRE: Semantic prompt-driven image restoration","author":"Qi","year":"2023","journal-title":"arXiv preprint arXiv:2312.11595"},{"key":"ref88","article-title":"Controlling vision-language models for universal image restoration","volume-title":"Proc. Int. Conf. Learn. Represent. (ICLR)","author":"Luo"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00308"},{"key":"ref90","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2018.2873610"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.1137\/16M1102884"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00958"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1109\/ICIPW68931.2025.11385987"},{"key":"ref94","article-title":"Diffir2vr-zero: Zero-shot video restoration with diffusion-based image restoration models","author":"Yeh","year":"2024","journal-title":"arXiv preprint arXiv:2407.01519"},{"key":"ref95","article-title":"Ditvr: Diffusion transformer for zero-shot video restoration","author":"Gao","year":"2025","journal-title":"arXiv preprint arXiv:2508.07811"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.19"},{"key":"ref97","article-title":"Tecogan: Temporally coherent gans for video superresolution","author":"Chu","year":"2018","journal-title":"arXiv preprint arXiv:1811.09393"},{"key":"ref98","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52734.2025.00205"},{"key":"ref99","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00976"},{"key":"ref100","article-title":"Star: Spatial-temporal augmentation with text-tovideo models for real-world video super-resolution","author":"Xie","year":"2025","journal-title":"arXiv preprint arXiv:2501.02976"},{"key":"ref101","article-title":"Dove: Efficient one-step diffusion model for real-world video super-resolution","author":"Chen","year":"2025","journal-title":"arXiv preprint arXiv:2505.16239"}],"event":{"name":"2026 International Conference on Computing, Networking and Communications (ICNC)","location":"Maui, HI, USA","start":{"date-parts":[[2026,2,16]]},"end":{"date-parts":[[2026,2,19]]}},"container-title":["2026 International Conference on Computing, Networking and Communications (ICNC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11416824\/11416825\/11416952.pdf?arnumber=11416952","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,10]],"date-time":"2026-03-10T05:27:47Z","timestamp":1773120467000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11416952\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,2,16]]},"references-count":101,"URL":"https:\/\/doi.org\/10.1109\/icnc68183.2026.11416952","relation":{},"subject":[],"published":{"date-parts":[[2026,2,16]]}}}