{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,11]],"date-time":"2025-10-11T00:24:18Z","timestamp":1760142258200,"version":"build-2065373602"},"reference-count":56,"publisher":"Springer Science and Business Media LLC","issue":"10","license":[{"start":{"date-parts":[[2025,7,15]],"date-time":"2025-07-15T00:00:00Z","timestamp":1752537600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,7,15]],"date-time":"2025-07-15T00:00:00Z","timestamp":1752537600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/100014718","name":"Innovative Research Group Project of the National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62272383"],"award-info":[{"award-number":["62272383"]}],"id":[{"id":"10.13039\/100014718","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100014718","name":"Innovative Research Group Project of the National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62371389, 62031023"],"award-info":[{"award-number":["62371389, 62031023"]}],"id":[{"id":"10.13039\/100014718","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2025,10]]},"DOI":"10.1007\/s11263-025-02528-x","type":"journal-article","created":{"date-parts":[[2025,7,15]],"date-time":"2025-07-15T10:05:44Z","timestamp":1752573944000},"page":"7222-7241","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["CEDFlow++: Latent Contour Enhancement for Dark Optical Flow Estimation"],"prefix":"10.1007","volume":"133","author":[{"given":"Fengyuan","family":"Zuo","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3742-4029","authenticated-orcid":false,"given":"Haiyan","family":"Jin","sequence":"additional","affiliation":[]},{"given":"Zhaolin","family":"Xiao","sequence":"additional","affiliation":[]},{"given":"Haonan","family":"Su","sequence":"additional","affiliation":[]},{"given":"Meng","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,7,15]]},"reference":[{"issue":"11","key":"2528_CR1","doi-asserted-by":"publisher","first-page":"2757","DOI":"10.1007\/s11263-023-01828-4","volume":"131","author":"B Bayramli","year":"2023","unstructured":"Bayramli, B., Hur, J., & Lu, H. (2023). Raft-msf: Self-supervised monocular scene flow using recurrent optimizer. IJCV, 131(11), 2757\u20132769.","journal-title":"IJCV"},{"key":"2528_CR2","doi-asserted-by":"crossref","unstructured":"Butler, D.J., Wulff, J., Stanley, G.B., & Black, M.J. (2012). A naturalistic open source movie for optical flow evaluation. In: ECCV, pp. 611\u2013625 Springer","DOI":"10.1007\/978-3-642-33783-3_44"},{"key":"2528_CR3","doi-asserted-by":"crossref","unstructured":"Cai, Y., Bian, H., Lin, J., Wang, H., Timofte, R., & Zhang, Y. (2023). Retinexformer: One-stage retinex-based transformer for low-light image enhancement. In: ICCV, pp. 12504\u201312513","DOI":"10.1109\/ICCV51070.2023.01149"},{"key":"2528_CR4","doi-asserted-by":"crossref","unstructured":"Cai, B., Xu, X., Guo, K., Jia, K., Hu, B., & Tao, D. (2017). A joint intrinsic-extrinsic prior model for retinex. In: ICCV, pp. 4000\u20134009","DOI":"10.1109\/ICCV.2017.431"},{"key":"2528_CR5","doi-asserted-by":"crossref","unstructured":"Cao, B., Sun, Y., Zhu, P., & Hu, Q. (2023). Multi-modal gated mixture of local-to-global experts for dynamic image fusion. In: ICCV, pp. 23555\u201323564","DOI":"10.1109\/ICCV51070.2023.02153"},{"issue":"12","key":"2528_CR6","doi-asserted-by":"publisher","first-page":"3054","DOI":"10.1007\/s11263-022-01652-2","volume":"130","author":"C Chi","year":"2022","unstructured":"Chi, C., Hao, T., Wang, Q., Guo, P., & Yang, X. (2022). Subspace-pnp: A geometric constraint loss for mutual assistance of depth and optical flow estimation. IJCV, 130(12), 3054\u20133069.","journal-title":"IJCV"},{"key":"2528_CR7","doi-asserted-by":"crossref","unstructured":"Chobola, T., Liu, Y., Zhang, H., Schnabel, J.A., & Peng, T. (2024). Fast context-based low-light image enhancement via neural implicit representations. In: ECCV, pp. 413\u2013430 . Springer","DOI":"10.1007\/978-3-031-73016-0_24"},{"key":"2528_CR8","doi-asserted-by":"publisher","first-page":"1371","DOI":"10.1609\/aaai.v38i2.27901","volume":"38","author":"MV Conde","year":"2024","unstructured":"Conde, M. V., Vazquez-Corral, J., Brown, M. S., & Timofte, R. (2024). Nilut: Conditional neural implicit 3d lookup tables for image enhancement. AAAI, 38, 1371\u20131379.","journal-title":"AAAI"},{"key":"2528_CR9","doi-asserted-by":"crossref","unstructured":"Dong, Q., Cao, C., & Fu, Y. (2023). Rethinking optical flow from geometric matching consistent perspective. In: CVPR, pp. 1337\u20131347","DOI":"10.1109\/CVPR52729.2023.00135"},{"key":"2528_CR10","doi-asserted-by":"crossref","unstructured":"Dosovitskiy, A., Fischer, P., Ilg, E., Hausser, P., Hazirbas, C., Golkov, V., Van Der\u00a0Smagt, P., Cremers, D., & Brox, T. (2015). Flownet: Learning optical flow with convolutional networks. In: ICCV, pp. 2758\u20132766","DOI":"10.1109\/ICCV.2015.316"},{"key":"2528_CR11","doi-asserted-by":"crossref","unstructured":"Geiger, A., Lenz, P., & Urtasun, R. (2012). Are we ready for autonomous driving? the kitti vision benchmark suite. In: CVPR, pp. 3354\u20133361 IEEE","DOI":"10.1109\/CVPR.2012.6248074"},{"issue":"2","key":"2528_CR12","first-page":"982","volume":"26","author":"X Guo","year":"2016","unstructured":"Guo, X., Li, Y., & Ling, H. (2016). Lime: Low-light image enhancement via illumination map estimation. IEEE TIP, 26(2), 982\u2013993.","journal-title":"IEEE TIP"},{"key":"2528_CR13","doi-asserted-by":"crossref","unstructured":"Huang, Z., Shi, X., Zhang, C., Wang, Q., Cheung, K.C., Qin, H., Dai, J., & Li, H. (2022). Flowformer: A transformer architecture for optical flow. In: ECCV, pp. 668\u2013685 . Springer","DOI":"10.1007\/978-3-031-19790-1_40"},{"key":"2528_CR14","doi-asserted-by":"crossref","unstructured":"Ilg, E., Mayer, N., Saikia, T., Keuper, M., Dosovitskiy, A., & Brox, T. (2017). Flownet 2.0: Evolution of optical flow estimation with deep networks. In: CVPR, pp. 2462\u20132470","DOI":"10.1109\/CVPR.2017.179"},{"key":"2528_CR15","doi-asserted-by":"crossref","unstructured":"Jiang, S., Campbell, D., Lu, Y., Li, H., & Hartley, R. (2021). Learning to estimate hidden motions with global motion aggregation. In: ICCV, pp. 9772\u20139781","DOI":"10.1109\/ICCV48922.2021.00963"},{"key":"2528_CR16","doi-asserted-by":"crossref","unstructured":"Jiang, S., Lu, Y., Li, H., & Hartley, R. (2021). Learning optical flow from a few matches. In: CVPR, pp. 16592\u201316600","DOI":"10.1109\/CVPR46437.2021.01632"},{"key":"2528_CR17","doi-asserted-by":"crossref","unstructured":"Li, H., Luo, K., & Liu, S. (2021). Gyroflow: Gyroscope-guided unsupervised optical flow learning. In: ICCV, pp. 12869\u201312878","DOI":"10.1109\/ICCV48922.2021.01263"},{"issue":"6","key":"2528_CR18","doi-asserted-by":"publisher","first-page":"2331","DOI":"10.1007\/s11263-023-01978-5","volume":"132","author":"H Li","year":"2024","unstructured":"Li, H., Luo, K., Zeng, B., & Liu, S. (2024). Gyroflow+: Gyroscope-guided unsupervised deep homography and optical flow learning. IJCV, 132(6), 2331\u20132349.","journal-title":"IJCV"},{"issue":"4","key":"2528_CR19","doi-asserted-by":"publisher","first-page":"1055","DOI":"10.1007\/s11263-023-01920-9","volume":"132","author":"Z Lin","year":"2024","unstructured":"Lin, Z., Liang, T., Xiao, T., Wang, Y., & Yang, M.-H. (2024). Flownas: neural architecture search for optical flow estimation. IJCV, 132(4), 1055\u20131074.","journal-title":"IJCV"},{"key":"2528_CR20","unstructured":"Loshchilov, I., Hutter, F., et al. (2017). Fixing weight decay regularization in adam. arXiv preprint arXiv:1711.05101 5, 5"},{"key":"2528_CR21","doi-asserted-by":"crossref","unstructured":"Luo, A., Li, X., Yang, F., Liu, J., Fan, H., & Liu, S. (2024). Flowdiffuser: Advancing optical flow estimation with diffusion models. In: CVPR, pp. 19167\u201319176","DOI":"10.1109\/CVPR52733.2024.01813"},{"key":"2528_CR22","doi-asserted-by":"crossref","unstructured":"Luo, K., Wang, C., Liu, S., Fan, H., Wang, J., & Sun, J. (2021). Upflow: Upsampling pyramid for unsupervised optical flow learning. In: CVPR, pp. 1045\u20131054","DOI":"10.1109\/CVPR46437.2021.00110"},{"key":"2528_CR23","doi-asserted-by":"crossref","unstructured":"Luo, A., Yang, F., Li, X., & Liu, S. (2022). Learning optical flow with kernel patch attention. In: CVPR, pp. 8906\u20138915","DOI":"10.1109\/CVPR52688.2022.00870"},{"key":"2528_CR24","doi-asserted-by":"crossref","unstructured":"Luo, A., Yang, F., Li, X., Nie, L., Lin, C., Fan, H., & Liu, S. (2023). Gaflow: Incorporating gaussian attention into optical flow. In: ICCV, pp. 9642\u20139651","DOI":"10.1109\/ICCV51070.2023.00884"},{"key":"2528_CR25","doi-asserted-by":"publisher","first-page":"1890","DOI":"10.1609\/aaai.v36i2.20083","volume":"36","author":"A Luo","year":"2022","unstructured":"Luo, A., Yang, F., Luo, K., Li, X., Fan, H., & Liu, S. (2022). Learning optical flow with adaptive graph reasoning. AAAI, 36, 1890\u20131898.","journal-title":"AAAI"},{"key":"2528_CR26","doi-asserted-by":"crossref","unstructured":"Menze, M., & Geiger, A. (2015). Object scene flow for autonomous vehicles. In: CVPR, pp. 3061\u20133070","DOI":"10.1109\/CVPR.2015.7298925"},{"key":"2528_CR27","doi-asserted-by":"publisher","first-page":"427","DOI":"10.5194\/isprsannals-II-3-W5-427-2015","volume":"2","author":"M Menze","year":"2015","unstructured":"Menze, M., Heipke, C., & Geiger, A. (2015). Joint 3d estimation of vehicles and scene flow. ISPRS annals of the photogrammetry, remote sensing and spatial information sciences, 2, 427\u2013434.","journal-title":"ISPRS annals of the photogrammetry, remote sensing and spatial information sciences"},{"key":"2528_CR28","unstructured":"Paszke, A., Gross, S., Chintala, S., Chanan, G., Yang, E., DeVito, Z., Lin, Z., Desmaison, A., Antiga, L., & Lerer, A. (2017). Automatic differentiation in pytorch"},{"key":"2528_CR29","doi-asserted-by":"crossref","unstructured":"Ren, S., Zhou, D., He, S., Feng, J., & Wang, X. (2022). Shunted self-attention via multi-scale token aggregation. In: CVPR, pp. 10853\u201310862","DOI":"10.1109\/CVPR52688.2022.01058"},{"key":"2528_CR30","first-page":"9113","volume":"29","author":"Z Ren","year":"2020","unstructured":"Ren, Z., Luo, W., Yan, J., Liao, W., Yang, X., Yuille, A., & Zha, H. (2020). Stflow: Self-taught optical flow estimation using pseudo labels. IEEE TIP, 29, 9113\u20139124.","journal-title":"IEEE TIP"},{"key":"2528_CR31","doi-asserted-by":"crossref","unstructured":"Shi, X., Huang, Z., Li, D., Zhang, M., Cheung, K.C., See, S., Qin, H., Dai, J., & Li, H. (2023). Flowformer++: Masked cost volume autoencoding for pretraining optical flow estimation. In: CVPR, pp. 1599\u20131610","DOI":"10.1109\/CVPR52729.2023.00160"},{"key":"2528_CR32","doi-asserted-by":"crossref","unstructured":"Shi, Y., Liu, D., Zhang, L., Tian, Y., Xia, X., & Fu, X. (2024). Zero-ig: zero-shot illumination-guided joint denoising and adaptive enhancement for low-light images. In: CVPR, pp. 3015\u20133024","DOI":"10.1109\/CVPR52733.2024.00291"},{"key":"2528_CR33","doi-asserted-by":"crossref","unstructured":"Smith, L.N., & Topin, N. (2019). Super-convergence: Very fast training of neural networks using large learning rates. In: Artificial Intelligence and Machine Learning for Multi-domain Operations Applications, vol. 11006, pp. 369\u2013386 SPIE","DOI":"10.1117\/12.2520589"},{"key":"2528_CR34","doi-asserted-by":"crossref","unstructured":"Sui, X., Li, S., Geng, X., Wu, Y., Xu, X., Liu, Y., Goh, R., & Zhu, H. (2022). Craft: Cross-attentional flow transformer for robust optical flow. In: CVPR, pp. 17602\u201317611","DOI":"10.1109\/CVPR52688.2022.01708"},{"key":"2528_CR35","doi-asserted-by":"crossref","unstructured":"Sun, D., Yang, X., Liu, M.-Y., & Kautz, J. (2018). Pwc-net: Cnns for optical flow using pyramid, warping, and cost volume. In: CVPR, pp. 8934\u20138943","DOI":"10.1109\/CVPR.2018.00931"},{"key":"2528_CR36","first-page":"11313","volume":"35","author":"S Sun","year":"2022","unstructured":"Sun, S., Chen, Y., Zhu, Y., Guo, G., & Li, G. (2022). Skflow: Learning optical flow with super kernels. NeurIPS, 35, 11313\u201311326.","journal-title":"NeurIPS"},{"key":"2528_CR37","doi-asserted-by":"crossref","unstructured":"Teed, Z., & Deng, J. (2020). Raft: Recurrent all-pairs field transforms for optical flow. In: ECCV, pp. 402\u2013419 Springer","DOI":"10.1007\/978-3-030-58536-5_24"},{"key":"2528_CR38","doi-asserted-by":"crossref","unstructured":"Wang, R., Xu, X., Fu, C.-W., Lu, J., Yu, B., & Jia, J. (2021). Seeing dynamic scene in the dark: A high-quality video dataset with mechatronic alignment. In: ICCV, pp. 9700\u20139709","DOI":"10.1109\/ICCV48922.2021.00956"},{"key":"2528_CR39","doi-asserted-by":"crossref","unstructured":"Wang, W., Yang, H., Fu, J., & Liu, J. (2024). Zero-reference low-light enhancement via physical quadruple priors. In: CVPR, pp. 26057\u201326066","DOI":"10.1109\/CVPR52733.2024.02462"},{"key":"2528_CR40","doi-asserted-by":"crossref","unstructured":"Wang, Y., Yu, Y., Yang, W., Guo, L., Chau, L.-P., Kot, A.C., & Wen, B. (2023). Exposurediffusion: Learning to expose for low-light image enhancement. In: ICCV, pp. 12438\u201312448","DOI":"10.1109\/ICCV51070.2023.01143"},{"issue":"1","key":"2528_CR41","doi-asserted-by":"publisher","first-page":"1250","DOI":"10.1109\/TPAMI.2022.3152562","volume":"45","author":"W Wang","year":"2022","unstructured":"Wang, W., Wang, X., Yang, W., & Liu, J. (2022). Unsupervised face detection in the dark. IEEE TPAMI, 45(1), 1250\u20131266.","journal-title":"IEEE TPAMI"},{"key":"2528_CR42","doi-asserted-by":"crossref","unstructured":"Wang, Y., Wan, R., Yang, W., Li, H., Chau, L.-P., & Kot, A. (2022). Low-light image enhancement with normalizing flow. AAAI,36, 2604\u20132612.","DOI":"10.1609\/aaai.v36i3.20162"},{"issue":"8","key":"2528_CR43","doi-asserted-by":"publisher","first-page":"3023","DOI":"10.1007\/s11263-024-01993-0","volume":"132","author":"B Wang","year":"2024","unstructured":"Wang, B., Zhang, Y., Li, J., Yu, Y., Sun, Z., Liu, L., & Hu, D. (2024). Splatflow: Learning multi-frame optical flow via splatting. IJCV, 132(8), 3023\u20133045.","journal-title":"IJCV"},{"key":"2528_CR44","unstructured":"Wei, C., Wang, W., Yang, W., & Liu, J. (2018). Deep retinex decomposition for low-light enhancement. arXiv preprint arXiv:1808.04560"},{"key":"2528_CR45","doi-asserted-by":"crossref","unstructured":"Xu, X., Wang, R., & Lu, J. (2023). Low-light image enhancement via structure modeling and guidance. In: CVPR, pp. 9893\u20139903","DOI":"10.1109\/CVPR52729.2023.00954"},{"key":"2528_CR46","doi-asserted-by":"crossref","unstructured":"Xu, X., Wang, R., Fu, C.-W., & Jia, J. (2022). Snr-aware low-light image enhancement. In: CVPR, pp. 17714\u201317724","DOI":"10.1109\/CVPR52688.2022.01719"},{"key":"2528_CR47","doi-asserted-by":"crossref","unstructured":"Xu, H., Yang, J., Cai, J., Zhang, J., & Tong, X. (2021). High-resolution optical flow from 1d attention and correlation. In: ICCV, pp. 10498\u201310507","DOI":"10.1109\/ICCV48922.2021.01033"},{"key":"2528_CR48","doi-asserted-by":"crossref","unstructured":"Xu, H., Zhang, J., Cai, J., Rezatofighi, H., & Tao, D. (2022). Gmflow: Learning optical flow via global matching. In: CVPR, pp. 8121\u20138130","DOI":"10.1109\/CVPR52688.2022.00795"},{"key":"2528_CR49","first-page":"3970","volume":"29","author":"SI Young","year":"2019","unstructured":"Young, S. I., Naman, A. T., & Taubman, D. (2019). Graph laplacian regularization for robust optical flow estimation. IEEE TIP, 29, 3970\u20133983.","journal-title":"IEEE TIP"},{"key":"2528_CR50","doi-asserted-by":"crossref","unstructured":"Zamir, S.W., Arora, A., Khan, S., Hayat, M., Khan, F.S., & Yang, M.-H. (2022). Restormer: Efficient transformer for high-resolution image restoration. In: CVPR, pp. 5728\u20135739","DOI":"10.1109\/CVPR52688.2022.00564"},{"key":"2528_CR51","doi-asserted-by":"crossref","unstructured":"Zhang, F., Li, Y., You, S., & Fu, Y. (2021). Learning temporal consistency for low light video enhancement from single images. In: CVPR, pp. 4967\u20134976","DOI":"10.1109\/CVPR46437.2021.00493"},{"key":"2528_CR52","doi-asserted-by":"crossref","unstructured":"Zhao, S., Zhao, L., Zhang, Z., Zhou, E., & Metaxas, D. (2022). Global matching with overlapping attention for optical flow estimation. In: CVPR, pp. 17592\u201317601","DOI":"10.1109\/CVPR52688.2022.01707"},{"key":"2528_CR53","doi-asserted-by":"crossref","unstructured":"Zheng, Y., L, F., & Zhang,\u00a0y. (2022) Optical flow in the dark. IEEE TPAMI 44(12), 9464\u20139476","DOI":"10.1109\/TPAMI.2021.3130302"},{"key":"2528_CR54","doi-asserted-by":"crossref","unstructured":"Zheng, Y., L, F., & Zhang,\u00a0M. (2020) Optical flow in the dark. In: CVPR, pp. 6749\u20136757","DOI":"10.1109\/CVPR42600.2020.00678"},{"key":"2528_CR55","unstructured":"Zhou, H., Chang, Y., Liu, H., Yan, W., Duan, Y., Shi, Z., & Yan, L. (2024). Exploring the common appearance-boundary adaptation for nighttime optical flow. arXiv preprint arXiv:2401.17642"},{"key":"2528_CR56","doi-asserted-by":"publisher","first-page":"7909","DOI":"10.1609\/aaai.v38i7.28627","volume":"38","author":"F Zuo","year":"2024","unstructured":"Zuo, F., Xiao, Z., Jin, H., & Su, H. (2024). Cedflow: latent contour enhancement for dark optical flow estimation. AAAI, 38, 7909\u20137916.","journal-title":"AAAI"}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02528-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11263-025-02528-x\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02528-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,10]],"date-time":"2025-10-10T08:49:42Z","timestamp":1760086182000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11263-025-02528-x"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7,15]]},"references-count":56,"journal-issue":{"issue":"10","published-print":{"date-parts":[[2025,10]]}},"alternative-id":["2528"],"URL":"https:\/\/doi.org\/10.1007\/s11263-025-02528-x","relation":{},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"type":"print","value":"0920-5691"},{"type":"electronic","value":"1573-1405"}],"subject":[],"published":{"date-parts":[[2025,7,15]]},"assertion":[{"value":"7 June 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 July 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"15 July 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}