{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,7,5]],"date-time":"2025-07-05T05:03:44Z","timestamp":1751691824094,"version":"3.40.3"},"publisher-location":"Cham","reference-count":53,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031726422"},{"type":"electronic","value":"9783031726439"}],"license":[{"start":{"date-parts":[[2024,11,22]],"date-time":"2024-11-22T00:00:00Z","timestamp":1732233600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,22]],"date-time":"2024-11-22T00:00:00Z","timestamp":1732233600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72643-9_3","type":"book-chapter","created":{"date-parts":[[2024,11,21]],"date-time":"2024-11-21T20:48:04Z","timestamp":1732222084000},"page":"37-53","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["BlazeBVD: Make Scale-Time Equalization Great Again for\u00a0Blind Video Deflickering"],"prefix":"10.1007","author":[{"given":"Xinmin","family":"Qiu","sequence":"first","affiliation":[]},{"given":"Congying","family":"Han","sequence":"additional","affiliation":[]},{"given":"Zicheng","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Bonan","family":"Li","sequence":"additional","affiliation":[]},{"given":"Tiande","family":"Guo","sequence":"additional","affiliation":[]},{"given":"Pingyu","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Xuecheng","family":"Nie","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,22]]},"reference":[{"key":"3_CR1","doi-asserted-by":"crossref","unstructured":"Afifi, M., Brubaker, M.A., Brown, M.S.: HistoGAN: controlling colors of GAN-generated and real images via color histograms. In: CVPR, pp. 7941\u20137950 (2021)","DOI":"10.1109\/CVPR46437.2021.00785"},{"key":"3_CR2","unstructured":"Anarchy, D.: Flicker free. https:\/\/digitalanarchy.com\/Flicker\/main.html"},{"key":"3_CR3","doi-asserted-by":"publisher","first-page":"108","DOI":"10.1016\/j.cviu.2006.11.012","volume":"107","author":"N Bassiou","year":"2007","unstructured":"Bassiou, N., Kotropoulos, C.: Color image histogram equalization by absolute discounting back-off. Comput. Vis. Image Underst. 107, 108\u2013122 (2007)","journal-title":"Comput. Vis. Image Underst."},{"key":"3_CR4","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2816795.2818107","volume":"34","author":"N Bonneel","year":"2015","unstructured":"Bonneel, N., Tompkin, J., Sunkavalli, K., Sun, D., Paris, S., Pfister, H.: Blind video temporal consistency. ACM Trans. Graph. 34, 1\u20139 (2015)","journal-title":"ACM Trans. Graph."},{"key":"3_CR5","doi-asserted-by":"publisher","first-page":"1487","DOI":"10.1109\/TUFFC.2020.3035965","volume":"68","author":"N Bottenus","year":"2021","unstructured":"Bottenus, N., Byram, B.C., Hyun, D.: Histogram matching for visual ultrasound image comparison. IEEE Trans. Ultrason. Ferroelectr. Freq. Control 68, 1487\u20131495 (2021)","journal-title":"IEEE Trans. Ultrason. Ferroelectr. Freq. Control"},{"key":"3_CR6","doi-asserted-by":"crossref","unstructured":"Chang, Y.L., Liu, Z.Y., Lee, K.Y., Hsu, W.: Free-form video inpainting with 3D gated convolution and temporal patchGAN. In: ICCV, pp. 9066\u20139075 (2019)","DOI":"10.1109\/ICCV.2019.00916"},{"key":"3_CR7","doi-asserted-by":"crossref","unstructured":"Chu, M., Xie, Y., Mayer, J., Leal-Taix\u00e9, L., Thuerey, N.: Learning temporal coherence via self-supervision for GAN-based video generation. ACM Trans. Graph. 39, 75-1 (2020)","DOI":"10.1145\/3386569.3392457"},{"key":"3_CR8","doi-asserted-by":"publisher","first-page":"241","DOI":"10.1109\/TIP.2005.860328","volume":"15","author":"J Delon","year":"2006","unstructured":"Delon, J.: Movie and video scale-time equalization application to flicker reduction. IEEE Trans. Image Process. 15, 241\u2013248 (2006)","journal-title":"IEEE Trans. Image Process."},{"key":"3_CR9","doi-asserted-by":"publisher","first-page":"703","DOI":"10.1137\/090766371","volume":"3","author":"J Delon","year":"2010","unstructured":"Delon, J., Desolneux, A.: Stabilization of flicker-like effects in image sequences through local contrast correction. SIAM J. Imag. Sci. 3, 703\u2013734 (2010)","journal-title":"SIAM J. Imag. Sci."},{"key":"3_CR10","doi-asserted-by":"crossref","unstructured":"Dosovitskiy, A., et al.: Flownet: learning optical flow with convolutional networks. In: ICCV, pp. 2758\u20132766 (2015)","DOI":"10.1109\/ICCV.2015.316"},{"key":"3_CR11","doi-asserted-by":"crossref","unstructured":"Eslami, N., Arefi, F., Mansourian, A.M., Kasaei, S.: Rethinking raft for efficient optical flow. arXiv preprint arXiv:2401.00833 (2024)","DOI":"10.1109\/MVIP62238.2024.10491183"},{"key":"3_CR12","doi-asserted-by":"crossref","unstructured":"Huang, J., et al.: Exposure normalization and compensation for multiple-exposure correction. In: CVPR, pp. 6043\u20136052 (2022)","DOI":"10.1109\/CVPR52688.2022.00595"},{"key":"3_CR13","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"163","DOI":"10.1007\/978-3-031-19800-7_10","volume-title":"Computer Vision - ECCV 2022","author":"J Huang","year":"2022","unstructured":"Huang, J., et al.: Deep Fourier-based exposure correction network with spatial-frequency interaction. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13679, pp. 163\u2013180. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19800-7_10"},{"key":"3_CR14","doi-asserted-by":"crossref","unstructured":"Huang, J., et al.: Learning sample relationship for exposure correction. In: CVPR, pp. 9904\u20139913 (2023)","DOI":"10.1109\/CVPR52729.2023.00955"},{"key":"3_CR15","doi-asserted-by":"crossref","unstructured":"Huang, J., Zhou, M., Liu, Y., Yao, M., Zhao, F., Xiong, Z.: Exposure-consistency representation learning for exposure correction. In: ACM MM, pp. 6309\u20136317 (2022)","DOI":"10.1145\/3503161.3547829"},{"key":"3_CR16","doi-asserted-by":"crossref","unstructured":"Ilg, E., Mayer, N., Saikia, T., Keuper, M., Dosovitskiy, A., Brox, T.: Flownet 2.0: evolution of optical flow estimation with deep networks. In: CVPR, pp. 2462\u20132470 (2017)","DOI":"10.1109\/CVPR.2017.179"},{"key":"3_CR17","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"694","DOI":"10.1007\/978-3-319-46475-6_43","volume-title":"Computer Vision \u2013 ECCV 2016","author":"J Johnson","year":"2016","unstructured":"Johnson, J., Alahi, A., Fei-Fei, L.: Perceptual losses for real-time style transfer and super-resolution. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9906, pp. 694\u2013711. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46475-6_43"},{"key":"3_CR18","doi-asserted-by":"crossref","unstructured":"Kanj, A., Talbot, H., Luparello, R.R.: Flicker removal and superpixel-based motion tracking for high speed videos. In: ICIP, pp. 245\u2013249 (2017)","DOI":"10.1109\/ICIP.2017.8296280"},{"key":"3_CR19","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3478513.3480546","volume":"40","author":"Y Kasten","year":"2021","unstructured":"Kasten, Y., Ofri, D., Wang, O., Dekel, T.: Layered neural atlases for consistent video editing. ACM Trans. Graph. 40, 1\u201312 (2021)","journal-title":"ACM Trans. Graph."},{"key":"3_CR20","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"179","DOI":"10.1007\/978-3-030-01267-0_11","volume-title":"Computer Vision \u2013 ECCV 2018","author":"W-S Lai","year":"2018","unstructured":"Lai, W.-S., Huang, J.-B., Wang, O., Shechtman, E., Yumer, E., Yang, M.-H.: Learning blind video temporal consistency. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11219, pp. 179\u2013195. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01267-0_11"},{"key":"3_CR21","doi-asserted-by":"crossref","unstructured":"Lei, C., Ren, X., Zhang, Z., Chen, Q.: Blind video deflickering by neural filtering with a flawed atlas. In: CVPR, pp. 10439\u201310448 (2023)","DOI":"10.1109\/CVPR52729.2023.01006"},{"key":"3_CR22","unstructured":"Lei, C., Xing, Y., Chen, Q.: Blind video temporal consistency via deep video prior. In: NeurIPS, pp. 1083\u20131093 (2020)"},{"key":"3_CR23","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"3_CR24","doi-asserted-by":"crossref","unstructured":"Ma, L., Ma, T., Liu, R., Fan, X., Luo, Z.: Toward fast, flexible, and robust low-light image enhancement. In: CVPR, pp. 5637\u20135646 (2022)","DOI":"10.1109\/CVPR52688.2022.00555"},{"key":"3_CR25","doi-asserted-by":"crossref","unstructured":"Mei, K., Patel, V.: VIDM: video implicit diffusion models. In: AAAI, pp. 9117\u20139125 (2023)","DOI":"10.1609\/aaai.v37i8.26094"},{"key":"3_CR26","unstructured":"Moniz, J.R.A., Kang, E., P\u00f3czos, B.: LucidDream: controlled temporally-consistent deepDream on videos. arXiv preprint arXiv:1911.11960 (2019)"},{"key":"3_CR27","doi-asserted-by":"crossref","unstructured":"Park, K., Woo, S., Kim, D., Cho, D., Kweon, I.S.: Preserving semantic and temporal consistency for unpaired video-to-video translation. In: ACM MM, pp. 1248\u20131257 (2019)","DOI":"10.1145\/3343031.3350864"},{"key":"3_CR28","doi-asserted-by":"crossref","unstructured":"Perazzi, F., Pont-Tuset, J., McWilliams, B., Van\u00a0Gool, L., Gross, M., Sorkine-Hornung, A.: A benchmark dataset and evaluation methodology for video object segmentation. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.85"},{"key":"3_CR29","unstructured":"P\u00e9rez-Pellitero, E., Sajjadi, M.S., Hirsch, M., Sch\u00f6lkopf, B.: Perceptual video super resolution with enhanced temporal consistency. arXiv preprint arXiv:1807.07930 (2018)"},{"key":"3_CR30","doi-asserted-by":"crossref","unstructured":"Pfeuffer, A., Dietmayer, K.: Separable convolutional LSTMs for faster video segmentation. In: ITSC, pp. 1072\u20131078 (2019)","DOI":"10.1109\/ITSC.2019.8917487"},{"key":"3_CR31","unstructured":"Pitie, F., Kokaram, A., Dahyot, R.: Removing flicker from old movies. Master\u2019s thesis, University of Nice-\u00cbophia Antipolis, France, \u00cbeptember (2002)"},{"key":"3_CR32","unstructured":"Pont-Tuset, J., Perazzi, F., Caelles, S., Arbel\u00e1ez, P., Sorkine-Hornung, A., Van Gool, L.: The 2017 Davis challenge on video object segmentation. arXiv:1704.00675 (2017)"},{"key":"3_CR33","doi-asserted-by":"crossref","unstructured":"Qiu, X., Han, C., Zhang, Z., Li, B., Guo, T., Nie, X.: DiffBFR: bootstrapping diffusion model for blind face restoration. In: Proceedings of the 31st ACM International Conference on Multimedia, pp. 7785\u20137795 (2023)","DOI":"10.1145\/3581783.3611731"},{"key":"3_CR34","unstructured":"RE:VISION: De:flicker. https:\/\/revisionfx.com\/products\/deflicker\/"},{"key":"3_CR35","doi-asserted-by":"crossref","unstructured":"Saito, M., Saito, S., Koyama, M., Kobayashi, S.: Train sparsely, generate densely: memory-efficient unsupervised training of high-resolution temporal GAN. Int. J. Comput. Vision, 2586\u20132606 (2020)","DOI":"10.1007\/s11263-020-01333-y"},{"key":"3_CR36","doi-asserted-by":"crossref","unstructured":"Skorokhodov, I., Tulyakov, S., Elhoseiny, M.: StyleGAN-V: a continuous video generator with the price, image quality and perks of styleGAN2. In: CVPR, pp. 3626\u20133636 (2022)","DOI":"10.1109\/CVPR52688.2022.00361"},{"key":"3_CR37","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"402","DOI":"10.1007\/978-3-030-58536-5_24","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Z Teed","year":"2020","unstructured":"Teed, Z., Deng, J.: RAFT: recurrent all-pairs field transforms for optical flow. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12347, pp. 402\u2013419. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58536-5_24"},{"key":"3_CR38","doi-asserted-by":"crossref","unstructured":"Thasarathan, H., Nazeri, K., Ebrahimi, M.: Automatic temporally coherent video colorization. In: CRV, pp. 189\u2013194 (2019)","DOI":"10.1109\/CRV.2019.00033"},{"key":"3_CR39","doi-asserted-by":"crossref","unstructured":"Thimonier, H., Despois, J., Kips, R., Perrot, M.: Learning long-term style preserving blind video temporal consistency. In: ICME, pp.\u00a01\u20136 (2021)","DOI":"10.1109\/ICME51207.2021.9428445"},{"key":"3_CR40","doi-asserted-by":"crossref","unstructured":"Wan, Z., Zhang, B., Chen, D., Liao, J.: Bringing old films back to life. In: CVPR, pp. 17694\u201317703 (2022)","DOI":"10.1109\/CVPR52688.2022.01717"},{"key":"3_CR41","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"343","DOI":"10.1007\/978-3-031-19797-0_20","volume-title":"Computer Vision - ECCV 2022","author":"H Wang","year":"2022","unstructured":"Wang, H., Xu, K., Lau, R.W.: Local color distributions prior for image enhancement. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13678, pp. 343\u2013359. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19797-0_20"},{"key":"3_CR42","unstructured":"Wang, W., et\u00a0al.: Magicvideo-v2: multi-stage high-aesthetic video generation. arXiv preprint arXiv:2401.04468 (2024)"},{"key":"3_CR43","doi-asserted-by":"crossref","unstructured":"Wang, X., Li, Y., Zhang, H., Shan, Y.: Towards real-world blind face restoration with generative facial prior. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9168\u20139178 (2021)","DOI":"10.1109\/CVPR46437.2021.00905"},{"key":"3_CR44","doi-asserted-by":"crossref","unstructured":"Wang, Y., Peng, L., Li, L., Cao, Y., Zha, Z.J.: Decoupling-and-aggregating for image exposure correction. In: CVPR, pp. 18115\u201318124 (2023)","DOI":"10.1109\/CVPR52729.2023.01737"},{"key":"3_CR45","doi-asserted-by":"publisher","first-page":"600","DOI":"10.1109\/TIP.2003.819861","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image quality assessment: from error visibility to structural similarity. IEEE Trans. Image Process. 13, 600\u2013612 (2004)","journal-title":"IEEE Trans. Image Process."},{"key":"3_CR46","unstructured":"Wu, J.Z., et al.: Tune-a-video: one-shot tuning of image diffusion models for text-to-video generation. In: ICCV, vol.\u00a039, pp. 7623\u20137633 (2023)"},{"key":"3_CR47","doi-asserted-by":"crossref","unstructured":"Xu, R., et al.: Pik-fix: restoring and colorizing old photos. In: WACV, pp. 1724\u20131734 (2023)","DOI":"10.1109\/WACV56688.2023.00177"},{"key":"3_CR48","doi-asserted-by":"publisher","first-page":"7153","DOI":"10.1109\/TIP.2020.2999209","volume":"29","author":"X Xu","year":"2020","unstructured":"Xu, X., Li, M., Sun, W., Yang, M.H.: Learning spatial and spatio-temporal pixel aggregations for image and video denoising. IEEE Trans. Image Process. 29, 7153\u20137165 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"3_CR49","doi-asserted-by":"crossref","unstructured":"Yang, T., Ren, P., Xie, X., Zhang, L.: Gan prior embedded network for blind face restoration in the wild. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 672\u2013681 (2021)","DOI":"10.1109\/CVPR46437.2021.00073"},{"key":"3_CR50","doi-asserted-by":"crossref","unstructured":"Zhan, X., Pan, X., Liu, Z., Lin, D., Loy, C.C.: Self-supervised learning via conditional motion propagation. In: CVPR, pp. 1881\u20131889 (2019)","DOI":"10.1109\/CVPR.2019.00198"},{"key":"3_CR51","unstructured":"Zhang, F., Shao, Y., Sun, Y., Zhu, K., Gao, C., Sang, N.: Unsupervised low-light image enhancement via histogram equalization prior. arXiv preprint arXiv:2112.01766 (2021)"},{"key":"3_CR52","unstructured":"Zhang, Z., Li, B., Nie, X., Han, C., Guo, T., Liu, L.: Towards consistent video editing with text-to-image diffusion models. In: Advances in Neural Information Processing Systems, vol. 36 (2024)"},{"key":"3_CR53","doi-asserted-by":"crossref","unstructured":"Zhou, Y., Xu, X., Shen, F., Gao, L., Lu, H., Shen, H.T.: Temporal denoising mask synthesis network for learning blind video temporal consistency. In: ACM MM, pp. 475\u2013483 (2020)","DOI":"10.1145\/3394171.3413788"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72643-9_3","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,21]],"date-time":"2024-11-21T21:24:36Z","timestamp":1732224276000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72643-9_3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,22]]},"ISBN":["9783031726422","9783031726439"],"references-count":53,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72643-9_3","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,11,22]]},"assertion":[{"value":"22 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}