{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,9]],"date-time":"2026-04-09T03:22:50Z","timestamp":1775704970939,"version":"3.50.1"},"reference-count":107,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2024,8,12]],"date-time":"2024-08-12T00:00:00Z","timestamp":1723420800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,8,12]],"date-time":"2024-08-12T00:00:00Z","timestamp":1723420800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"Natural Science Foundation of China","doi-asserted-by":"crossref","award":["62106129"],"award-info":[{"award-number":["62106129"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"DOI":"10.13039\/501100001809","name":"Natural Science Foundation of China","doi-asserted-by":"crossref","award":["62176139"],"award-info":[{"award-number":["62176139"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"DOI":"10.13039\/501100001809","name":"Natural Science Foundation of China","doi-asserted-by":"crossref","award":["62276155"],"award-info":[{"award-number":["62276155"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"DOI":"10.13039\/501100007129","name":"Natural Science Foundation of Shandong Province","doi-asserted-by":"publisher","award":["ZR2021QF053"],"award-info":[{"award-number":["ZR2021QF053"]}],"id":[{"id":"10.13039\/501100007129","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100007129","name":"Natural Science Foundation of Shandong Province","doi-asserted-by":"publisher","award":["ZR2021ZD15"],"award-info":[{"award-number":["ZR2021ZD15"]}],"id":[{"id":"10.13039\/501100007129","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2025,2]]},"DOI":"10.1007\/s11263-024-02205-5","type":"journal-article","created":{"date-parts":[[2024,8,12]],"date-time":"2024-08-12T05:02:07Z","timestamp":1723438927000},"page":"652-671","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":9,"title":["Variational Rectification Inference for Learning with Noisy Labels"],"prefix":"10.1007","volume":"133","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-7715-5682","authenticated-orcid":false,"given":"Haoliang","family":"Sun","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4073-7598","authenticated-orcid":false,"given":"Qi","family":"Wei","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2839-5799","authenticated-orcid":false,"given":"Lei","family":"Feng","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5653-8286","authenticated-orcid":false,"given":"Yupeng","family":"Hu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4547-3982","authenticated-orcid":false,"given":"Fan","family":"Liu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9572-2345","authenticated-orcid":false,"given":"Hehe","family":"Fan","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8465-1294","authenticated-orcid":false,"given":"Yilong","family":"Yin","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,8,12]]},"reference":[{"key":"2205_CR1","unstructured":"Arazo, E., Ortego, D., Albert, P., et\u00a0al. (2019). Unsupervised label noise modeling and loss correction. In: ICML"},{"key":"2205_CR2","unstructured":"Arpit, D., Jastrzkebski, S., Ballas, N., et\u00a0al. (2017). A closer look at memorization in deep networks. In: ICML"},{"key":"2205_CR3","doi-asserted-by":"crossref","unstructured":"Bai, Y., & Liu, T. (2021). Me-momentum: Extracting hard confident examples from noisily labeled data. In: ICCV","DOI":"10.1109\/ICCV48922.2021.00918"},{"key":"2205_CR4","unstructured":"Bai, Y., Yang, E., Han, B., et\u00a0al. (2021). Understanding and improving early stopping for learning with noisy labels. In: NeurIPS"},{"key":"2205_CR5","unstructured":"Bao, F., Wu, G., Li, C., et\u00a0al. (2021). Stability and generalization of bilevel programming in hyperparameter optimization. In: NeurIPS"},{"key":"2205_CR6","unstructured":"Berthelot, D., Carlini, N., Goodfellow, I., et\u00a0al. (2019). Mixmatch: A holistic approach to semi-supervised learning. NeurIPS"},{"key":"2205_CR7","doi-asserted-by":"crossref","unstructured":"Bossard, L., Guillaumin, M., Van\u00a0Gool, L. (2014). Food-101\u2013mining discriminative components with random forests. In: ECCV","DOI":"10.1007\/978-3-319-10599-4_29"},{"key":"2205_CR8","doi-asserted-by":"crossref","unstructured":"Chen, Y., Shen, X., Hu, S. X., et\u00a0al. (2021). Boosting co-teaching with compression regularization for label noise. In: CVPR","DOI":"10.1109\/CVPRW53098.2021.00302"},{"key":"2205_CR9","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3186930","author":"Y Chen","year":"2022","unstructured":"Chen, Y., Hu, S. X., Shen, X., et al. (2022). Compressing features for learning with noisy labels. IEEE Transactions on Neural Networks and Learning Systems. https:\/\/doi.org\/10.1109\/TNNLS.2022.3186930","journal-title":"IEEE Transactions on Neural Networks and Learning Systems"},{"key":"2205_CR10","first-page":"11104","volume":"35","author":"D Cheng","year":"2022","unstructured":"Cheng, D., Ning, Y., Wang, N., et al. (2022). Class-dependent label-noise learning with cycle-consistency regularization. Advances in Neural Information Processing Systems, 35, 11104\u201311116.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"2205_CR11","unstructured":"Cheng, H., Zhu, Z., Li, X., et\u00a0al. (2021). Learning with instance-dependent label noise: A sample sieve approach. In: ICLR"},{"key":"2205_CR12","doi-asserted-by":"crossref","unstructured":"Cubuk, E. D., Zoph, B., Shlens, J., et\u00a0al. (2020). Randaugment: Practical automated data augmentation with a reduced search space. In: CVPR workshops, pp. 702\u2013703","DOI":"10.1109\/CVPRW50498.2020.00359"},{"key":"2205_CR13","doi-asserted-by":"crossref","unstructured":"Cui, Y., Jia, M., Lin, T. Y., et\u00a0al. (2019). Class-balanced loss based on effective number of samples. In: CVPR","DOI":"10.1109\/CVPR.2019.00949"},{"key":"2205_CR14","unstructured":"Englesson, E. (2021). Generalized Jensen-Shannon divergence loss for learning with noisy labels. In: NeurIPS"},{"key":"2205_CR15","unstructured":"Fallah, A., Mokhtari, A., & Ozdaglar, A. (2020). On the convergence theory of gradient-based model-agnostic meta-learning algorithms. In: AISTATS"},{"key":"2205_CR16","unstructured":"Finn, C., Abbeel, P., & Levine, S. (2017). Model-agnostic meta-learning for fast adaptation of deep networks. In: ICML"},{"key":"2205_CR17","unstructured":"Franceschi, L., Frasconi, P., Salzo, S. et\u00a0al. (2018). Bilevel programming for hyperparameter optimization and meta-learning. In: ICML"},{"key":"2205_CR18","doi-asserted-by":"crossref","unstructured":"Fu, Z., Song, K., Zhou, L., et\u00a0al. (2024). Noise-aware image captioning with progressively exploring mismatched words. In: AAAI, pp. 12091\u201312099","DOI":"10.1609\/aaai.v38i11.29097"},{"key":"2205_CR19","doi-asserted-by":"crossref","unstructured":"Ghosh, A., Kumar, H., Sastry, P. (2017). Robust loss functions under label noise for deep neural networks. In: AAAI","DOI":"10.1609\/aaai.v31i1.10894"},{"key":"2205_CR20","unstructured":"Goldberger, J., & Ben-Reuven, E. (2017). Training deep neural-networks using a noise adaptation layer. In: ICLR"},{"key":"2205_CR21","doi-asserted-by":"crossref","unstructured":"Gudovskiy, D., Rigazio, L., Ishizaka, S., et\u00a0al. (2021). Autodo: Robust autoaugment for biased data with label noise via scalable probabilistic implicit differentiation. In: CVPR","DOI":"10.1109\/CVPR46437.2021.01633"},{"key":"2205_CR22","unstructured":"Han, B., Yao, J., Niu, G., et\u00a0al. (2018a). Masking: A new perspective of noisy supervision. In: NeurIPS"},{"key":"2205_CR23","unstructured":"Han, B., Yao, Q., Yu, X., et\u00a0al. (2018b). Co-teaching: Robust training of deep neural networks with extremely noisy labels. NeurIPS 31"},{"key":"2205_CR24","doi-asserted-by":"crossref","unstructured":"Han, J., Luo, P., & Wang, X. (2019). Deep self-learning from noisy labels. In: ICCV","DOI":"10.1109\/ICCV.2019.00524"},{"key":"2205_CR25","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., et\u00a0al. (2016). Deep residual learning for image recognition. In: CVPR","DOI":"10.1109\/CVPR.2016.90"},{"key":"2205_CR26","unstructured":"Hendrycks, D., Mazeika, M., Wilson, D., et\u00a0al. (2018). Using trusted data to train deep networks on labels corrupted by severe noise. In: NeurIPS"},{"key":"2205_CR27","unstructured":"Higgins, I., Matthey, L., Pal, A., et\u00a0al. (2017) beta-vae: Learning basic visual concepts with a constrained variational framework. In: ICLR"},{"issue":"9","key":"2205_CR28","first-page":"5149","volume":"44","author":"T Hospedales","year":"2022","unstructured":"Hospedales, T., Antoniou, A., Micaelli, P., et al. (2022). Meta-learning in neural networks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(9), 5149\u20135169.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"2205_CR29","doi-asserted-by":"crossref","unstructured":"Huang, H., Kang, H., Liu, S., et\u00a0al. (2023). Paddles: Phase-amplitude spectrum disentangled early stopping for learning with noisy labels. In: ICCV","DOI":"10.1109\/ICCV51070.2023.01533"},{"key":"2205_CR30","unstructured":"Iakovleva, E., Verbeek, J., & Alahari, K. (2020). Meta-learning with shared amortized variational inference. In: ICML"},{"key":"2205_CR31","doi-asserted-by":"crossref","unstructured":"Iscen, A., Valmadre, J., Arnab, A., et\u00a0al. (2022). Learning with neighbor consistency for noisy labels. In: CVPR","DOI":"10.1109\/CVPR52688.2022.00463"},{"key":"2205_CR32","unstructured":"Jiang, L., Zhou, Z., Leung, T., et\u00a0al. (2018). Mentornet: Learning data-driven curriculum for very deep neural networks on corrupted labels. In: ICML"},{"key":"2205_CR33","unstructured":"Kang, H., Liu, S., Huang, H., et\u00a0al. (2023). Unleashing the potential of regularization strategies in learning with noisy labels. arXiv preprint arXiv:2307.05025"},{"key":"2205_CR34","doi-asserted-by":"crossref","unstructured":"Kim, Y., Yun, J., Shon, H., et\u00a0al. (2021). Joint negative and positive learning for noisy labels. In: CVPR","DOI":"10.1109\/CVPR46437.2021.00932"},{"key":"2205_CR35","unstructured":"Kingma, D. P., & Welling, M. (2014). Auto-encoding variational bayes. In: ICLR"},{"key":"2205_CR36","unstructured":"Krizhevsky, A., Hinton, G., et\u00a0al. (2009). Learning multiple layers of features from tiny images"},{"key":"2205_CR37","unstructured":"Kumar, M. P., Packer, B., Koller, D. (2010). Self-paced learning for latent variable models. In: NeurIPS"},{"key":"2205_CR38","doi-asserted-by":"crossref","unstructured":"Kye, S. M., Choi, K., Yi, J., et\u00a0al. (2022). Learning with noisy labels by efficient transition matrix estimation to combat label miscorrection. In: ECCV, Springer, pp. 717\u2013738","DOI":"10.1007\/978-3-031-19806-9_41"},{"key":"2205_CR39","doi-asserted-by":"crossref","unstructured":"Lee, K. H., He, X., Zhang, L., et\u00a0al. (2018). Cleannet: Transfer learning for scalable image classifier training with label noise. In: CVPR","DOI":"10.1109\/CVPR.2018.00571"},{"key":"2205_CR40","doi-asserted-by":"crossref","unstructured":"Li, J., Wong, Y., Zhao, Q., et\u00a0al. (2019). Learning to learn from noisy labeled data. In: CVPR","DOI":"10.1109\/CVPR.2019.00519"},{"key":"2205_CR41","unstructured":"Li, J., Socher, R. & Hoi, S. C. (2020). Dividemix: Learning with noisy labels as semi-supervised learning. In: ICLR"},{"key":"2205_CR42","unstructured":"Li, J., Xiong, C., & Hoi, S. (2021). Mopro: Webly supervised learning with momentum prototypes. In: ICLR"},{"key":"2205_CR43","doi-asserted-by":"crossref","unstructured":"Li, S., Xia, X., Ge, S., et\u00a0al. (2022a). Selective-supervised contrastive learning with noisy labels. In: CVPR","DOI":"10.1109\/CVPR52688.2022.00041"},{"key":"2205_CR44","first-page":"24184","volume":"35","author":"S Li","year":"2022","unstructured":"Li, S., Xia, X., Zhang, H., et al. (2022). Estimating noise transition matrix with label correlations for noisy multi-label learning. Advances in Neural Information Processing Systems, 35, 24184\u201324198.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"2205_CR45","doi-asserted-by":"publisher","first-page":"103877","DOI":"10.1016\/j.artint.2023.103877","volume":"317","author":"H Liu","year":"2023","unstructured":"Liu, H., Zhong, Z., Sebe, N., et al. (2023). Mitigating robust overfitting via self-residual-calibration regularization. Artificial Intelligence, 317, 103877.","journal-title":"Artificial Intelligence"},{"key":"2205_CR46","unstructured":"Liu, S., Niles-Weed, J., Razavian, N., et\u00a0al. (2020). Early-learning regularization prevents memorization of noisy labels. In: NeurIPS"},{"key":"2205_CR47","unstructured":"Liu, S., Zhu, Z., Qu, Q., et\u00a0al. (2022). Robust training under label noise by over-parameterization. In: ICML"},{"issue":"3","key":"2205_CR48","doi-asserted-by":"publisher","first-page":"447","DOI":"10.1109\/TPAMI.2015.2456899","volume":"38","author":"T Liu","year":"2015","unstructured":"Liu, T., & Tao, D. (2015). Classification with noisy labels by importance reweighting. IEEE Transactions on Pattern Analysis and Machine Intelligence, 38(3), 447\u2013461.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"2205_CR49","unstructured":"Liu, Y., & Guo, H. (2020). Peer loss functions: Learning from noisy labels without knowing noise rates. In: ICML"},{"key":"2205_CR50","doi-asserted-by":"crossref","unstructured":"Ma, X., Wang, Y., Houle, M. E., et\u00a0al. (2018). Dimensionality-driven learning with noisy labels. In: ICML","DOI":"10.1109\/CVPR.2018.00906"},{"key":"2205_CR51","unstructured":"Malach, E., & Shalev-Shwartz, S. (2017). Decoupling \"when to update\" from \"how to update\". NeurIPS 30"},{"key":"2205_CR52","volume-title":"Probabilistic machine learning: Advanced topics","author":"KP Murphy","year":"2023","unstructured":"Murphy, K. P. (2023). Probabilistic machine learning: Advanced topics. MIT Press."},{"key":"2205_CR53","doi-asserted-by":"crossref","unstructured":"Nishi, K., Ding, Y., Rich, A., et\u00a0al. (2021). Augmentation strategies for learning with noisy labels. In: CVPR","DOI":"10.1109\/CVPR46437.2021.00793"},{"key":"2205_CR54","doi-asserted-by":"crossref","unstructured":"Ortego, D., Arazo, E., Albert, P., et\u00a0al. (2021). Multi-objective interpolation training for robustness to label noise. In: CVPR","DOI":"10.1109\/CVPR46437.2021.00654"},{"key":"2205_CR55","unstructured":"Pereyra, G., Tucker, G., Chorowski, J., et\u00a0al. (2017). Regularizing neural networks by penalizing confident output distributions. arXiv preprint arXiv:1701.06548"},{"key":"2205_CR56","doi-asserted-by":"publisher","first-page":"13567","DOI":"10.1109\/TPAMI.2023.3297058","volume":"45","author":"N Pu","year":"2023","unstructured":"Pu, N., Zhong, Z., Sebe, N., et al. (2023). A memorizing and generalizing framework for lifelong person re-identification. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45, 13567\u201313585.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"2205_CR57","unstructured":"Reed, S., Lee, H., Anguelov, D., et\u00a0al. (2015). Training deep neural networks on noisy labels with bootstrapping. In: ICLR"},{"key":"2205_CR58","unstructured":"Ren, M., Zeng, W., Yang, B., et\u00a0al. (2018). Learning to reweight examples for robust deep learning. In: ICML"},{"key":"2205_CR59","doi-asserted-by":"crossref","unstructured":"Sharma, K., Donmez, P., Luo, E., et\u00a0al. (2020). Noiserank: Unsupervised label noise reduction with dependence models. In: ECCV","DOI":"10.1007\/978-3-030-58583-9_44"},{"key":"2205_CR60","unstructured":"Shen, Y., & Sanghavi, S. (2019). Learning with bad training data via iterative trimmed loss minimization. In: ICML"},{"issue":"11","key":"2205_CR61","doi-asserted-by":"publisher","first-page":"1614","DOI":"10.1007\/s11263-019-01166-4","volume":"127","author":"Y Shen","year":"2019","unstructured":"Shen, Y., Liu, L., & Shao, L. (2019). Unsupervised binary representation learning with deep variational networks. International Journal of Computer Vision, 127(11), 1614\u20131628.","journal-title":"International Journal of Computer Vision"},{"key":"2205_CR62","unstructured":"Shu, J., Xie, Q., Yi, L., et\u00a0al. (2019). Meta-weight-net: Learning an explicit mapping for sample weighting. In: NeurIPS"},{"issue":"10","key":"2205_CR63","doi-asserted-by":"publisher","first-page":"11521","DOI":"10.1109\/TPAMI.2023.3271451","volume":"45","author":"J Shu","year":"2023","unstructured":"Shu, J., Yuan, X., Meng, D., et al. (2023). Cmw-net: Learning a class-aware sample weighting mapping for robust deep learning. IEEE Transaction on Pattern Analysis and Machine Intelligence, 45(10), 11521\u201311539.","journal-title":"IEEE Transaction on Pattern Analysis and Machine Intelligence"},{"key":"2205_CR64","unstructured":"Sohn, K., Berthelot, D., Carlini, N., et\u00a0al. (2020). Fixmatch: Simplifying semi-supervised learning with consistency and confidence. NeurIPS"},{"key":"2205_CR65","unstructured":"Song, H., Kim, M., & Lee, J. G. (2019). Selfie: Refurbishing unclean samples for robust deep learning. In: ICML"},{"key":"2205_CR66","unstructured":"Sukhbaatar, S., Bruna, J., Paluri, M., et\u00a0al. (2015). Training convolutional networks with noisy labels. In: ICLR"},{"key":"2205_CR67","doi-asserted-by":"publisher","first-page":"108467","DOI":"10.1016\/j.patcog.2021.108467","volume":"124","author":"H Sun","year":"2022","unstructured":"Sun, H., Guo, C., Wei, Q., et al. (2022). Learning to rectify for robust learning with noisy labels. Pattern Recognition, 124, 108467.","journal-title":"Pattern Recognition"},{"key":"2205_CR68","doi-asserted-by":"crossref","unstructured":"Sun, Z., Shen, F., Huang, D., et\u00a0al. (2022b). Pnp: Robust learning from noisy labels by probabilistic noise prediction. In: CVPR, pp. 5311\u20135320","DOI":"10.1109\/CVPR52688.2022.00524"},{"key":"2205_CR69","doi-asserted-by":"crossref","unstructured":"Tanno, R., Saeedi, A., Sankaranarayanan, S., et\u00a0al. (2019). Learning from noisy labels by regularized estimation of annotator confusion. In: CVPR","DOI":"10.1109\/CVPR.2019.01150"},{"key":"2205_CR70","doi-asserted-by":"crossref","unstructured":"Taraday, M. K., & Baskin, C. (2023). Enhanced meta label correction for coping with label corruption. In: ICCV, pp. 16295\u201316304","DOI":"10.1109\/ICCV51070.2023.01493"},{"key":"2205_CR71","unstructured":"Vahdat, A. (2017). Toward robustness against label noise in training deep discriminative neural networks. In: NeurIPS"},{"key":"2205_CR72","unstructured":"Virmaux, A., & Scaman, K. (2018). Lipschitz regularity of deep neural networks: Analysis and efficient estimation. NeurIPS 31"},{"key":"2205_CR73","unstructured":"Wang, X., Kodirov, E., Hua, Y., et\u00a0al. (2019). Improving MAE against CCE under label noise. arXiv preprint arXiv:1903.12141"},{"key":"2205_CR74","unstructured":"Wang, Y., Kucukelbir, A., Blei, D. M. (2017). Robust probabilistic modeling with Bayesian data reweighting. In: ICML"},{"key":"2205_CR75","doi-asserted-by":"crossref","unstructured":"Wang, Z., Hu, G., & Hu, Q. (2020). Training noise-robust deep neural networks via meta-learning. In: CVPR","DOI":"10.1109\/CVPR42600.2020.00458"},{"key":"2205_CR76","doi-asserted-by":"crossref","unstructured":"Wei, H., Feng, L., Chen, X., et\u00a0al. (2020). Combating noisy labels by agreement: A joint training method with co-regularization. In: CVPR","DOI":"10.1109\/CVPR42600.2020.01374"},{"key":"2205_CR77","doi-asserted-by":"crossref","unstructured":"Wei, Q., Sun, H., Lu, X., et\u00a0al. (2022). Self-filtering: A noise-aware sample selection for label noise with confidence penalization. In: ECCV","DOI":"10.1007\/978-3-031-20056-4_30"},{"key":"2205_CR78","doi-asserted-by":"crossref","unstructured":"Wei, Q., Feng, L., Sun, H., et\u00a0al. (2023). Fine-grained classification with noisy labels. In: CVPR","DOI":"10.1109\/CVPR52729.2023.01121"},{"key":"2205_CR79","doi-asserted-by":"crossref","unstructured":"Wu, Y., Shu, J., Xie, Q., et\u00a0al. (2021). Learning to purify noisy labels via meta soft label corrector. In: AAAI","DOI":"10.1609\/aaai.v35i12.17244"},{"key":"2205_CR80","unstructured":"Xia, X., Liu, T., Han, B., et\u00a0al. (2020a). Robust early-learning: Hindering the memorization of noisy labels. In: ICLR"},{"key":"2205_CR81","unstructured":"Xia, X., Liu, T., Han, B., et\u00a0al. (2020b). Part-dependent label noise: Towards instance-dependent label noise. In: NeurIPS"},{"key":"2205_CR82","doi-asserted-by":"crossref","unstructured":"Xia, X., Han, B., Zhan, Y., et\u00a0al. (2023). Combating noisy labels with sample selection by mining high-discrepancy examples. In: ICCV","DOI":"10.1109\/ICCV51070.2023.00176"},{"key":"2205_CR83","unstructured":"Xiao, T., Xia, T., Yang, Y., et\u00a0al. (2015). Learning from massive noisy labeled data for image classification. In: CVPR"},{"key":"2205_CR84","doi-asserted-by":"crossref","unstructured":"Xu, Y., Zhu, L., Jiang, L., et\u00a0al. (2021a). Faster meta update strategy for noise-robust deep learning. In: CVPR","DOI":"10.1109\/CVPR46437.2021.00021"},{"key":"2205_CR85","doi-asserted-by":"crossref","unstructured":"Xu, Y., Zhu, L., Jiang, L., et\u00a0al. (2021b). Faster meta update strategy for noise-robust deep learning. In: CVPR","DOI":"10.1109\/CVPR46437.2021.00021"},{"key":"2205_CR86","doi-asserted-by":"crossref","unstructured":"Xu, Y., Niu, X., Yang, J., et\u00a0al. (2023). Usdnl: Uncertainty-based single dropout in noisy label learning. In: AAAI, pp. 10648\u201310656","DOI":"10.1609\/aaai.v37i9.26264"},{"key":"2205_CR87","doi-asserted-by":"crossref","unstructured":"Yang, Y., Jiang, N., Xu, Y., et\u00a0al. (2024). Robust semi-supervised learning by wisely leveraging open-set data. IEEE Transactions on Pattern Analysis and Machine Intelligence, pp. 1\u201315","DOI":"10.1109\/TPAMI.2024.3403994"},{"key":"2205_CR88","unstructured":"Yao, Y., Liu, T., Han, B., et\u00a0al. (2020). Dual t: Reducing estimation error for transition matrix in label-noise learning. In: NeurIPS"},{"key":"2205_CR89","first-page":"4409","volume":"34","author":"Y Yao","year":"2021","unstructured":"Yao, Y., Liu, T., Gong, M., et al. (2021). Instance-dependent label-noise learning under a structural causal model. Advances in Neural Information Processing Systems, 34, 4409\u20134420.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"2205_CR90","doi-asserted-by":"crossref","unstructured":"Yao, Y., Sun, Z., Zhang, C., et\u00a0al. (2021b). Jo-src: A contrastive approach for combating noisy labels. In: CVPR, pp. 5192\u20135201","DOI":"10.1109\/CVPR46437.2021.00515"},{"key":"2205_CR91","unstructured":"Yao, Y., Gong, M., Du, Y., et\u00a0al. (2023). Which is better for learning with noisy labels: The semi-supervised method or modeling label noise? In: ICML"},{"key":"2205_CR92","unstructured":"Yu, X., Han, B., Yao, J., et\u00a0al. (2019). How does disagreement help generalization against label corruption? In: ICML"},{"key":"2205_CR93","doi-asserted-by":"crossref","unstructured":"Yu, X., Jiang, Y., Shi, T., et\u00a0al. (2023). How to prevent the continuous damage of noises to model training? In: CVPR","DOI":"10.1109\/CVPR52729.2023.01160"},{"key":"2205_CR94","doi-asserted-by":"crossref","unstructured":"Yuan, S., Feng, L., & Liu, T. (2023). Late stopping: Avoiding confidently learning from mislabeled examples. In: ICCV","DOI":"10.1109\/ICCV51070.2023.01473"},{"key":"2205_CR95","doi-asserted-by":"crossref","unstructured":"Zadrozny, B. (2004). Learning and evaluating classifiers under sample selection bias. In: ICML","DOI":"10.1145\/1015330.1015425"},{"key":"2205_CR96","doi-asserted-by":"crossref","unstructured":"Zagoruyko, S., & Komodakis, N. (2016). Wide residual networks. In: BMVC","DOI":"10.5244\/C.30.87"},{"key":"2205_CR97","unstructured":"Zhang, H., Cisse, M., Dauphin, Y. N., et\u00a0al. (2018). mixup: Beyond empirical risk minimization. In: ICLR"},{"key":"2205_CR98","doi-asserted-by":"crossref","unstructured":"Zhang, W., Wang, Y., & Qiao, Y. (2019). Metacleaner: Learning to hallucinate clean representations for noisy-labeled visual recognition. In: CVPR","DOI":"10.1109\/CVPR.2019.00755"},{"key":"2205_CR99","unstructured":"Zhang, Y., Niu, G., Sugiyama, M. (2021a). Learning noise transition matrix from only noisy labels via total variation regularization. In: ICML"},{"key":"2205_CR100","unstructured":"Zhang, Y., Zheng, S., Wu, P., et\u00a0al. (2021b). Learning with feature-dependent label noise: A progressive approach. In: ICLR"},{"key":"2205_CR101","doi-asserted-by":"crossref","unstructured":"Zhang, Z., & Pfister, T. (2021). Learning fast sample re-weighting without reward data. In: ICCV, pp. 725\u2013734","DOI":"10.1109\/ICCV48922.2021.00076"},{"key":"2205_CR102","unstructured":"Zhang, Z., & Sabuncu, M. R. (2018). Generalized cross entropy loss for training deep neural networks with noisy labels. In: NeurIPS"},{"issue":"3","key":"2205_CR103","doi-asserted-by":"publisher","first-page":"1194","DOI":"10.1109\/TNNLS.2021.3105104","volume":"34","author":"Q Zhao","year":"2023","unstructured":"Zhao, Q., Shu, J., Yuan, X., et al. (2023). A probabilistic formulation for meta-weight-net. IEEE Transactions on Neural Networks and Learning Systems, 34(3), 1194\u20131208.","journal-title":"IEEE Transactions on Neural Networks and Learning Systems"},{"key":"2205_CR104","doi-asserted-by":"crossref","unstructured":"Zheng, G., Awadallah, A. H., & Dumais, S. (2021). Meta label correction for noisy label learning. In: AAAI","DOI":"10.1609\/aaai.v35i12.17319"},{"key":"2205_CR105","doi-asserted-by":"crossref","unstructured":"Zhou, X., Liu, X., Wang, C., et\u00a0al. (2021). Learning with noisy labels via sparse regularization. In: ICCV","DOI":"10.1109\/ICCV48922.2021.00014"},{"issue":"5","key":"2205_CR106","doi-asserted-by":"publisher","first-page":"1259","DOI":"10.1007\/s11263-022-01598-5","volume":"130","author":"J Zhu","year":"2022","unstructured":"Zhu, J., Zhao, D., Zhang, B., et al. (2022). Disentangled inference for GANs with latently invertible autoencoder. International Journal of Computer Vision, 130(5), 1259\u20131276.","journal-title":"International Journal of Computer Vision"},{"key":"2205_CR107","doi-asserted-by":"crossref","unstructured":"Zhu, Z., Liu, T., & Liu, Y. (2021). A second-order approach to learning with instance-dependent label noise. In: CVPR","DOI":"10.1109\/CVPR46437.2021.00998"}],"updated-by":[{"DOI":"10.1007\/s11263-024-02242-0","type":"correction","label":"Correction","source":"publisher","updated":{"date-parts":[[2024,9,25]],"date-time":"2024-09-25T00:00:00Z","timestamp":1727222400000}}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-024-02205-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11263-024-02205-5\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-024-02205-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,22]],"date-time":"2025-01-22T06:41:23Z","timestamp":1737528083000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11263-024-02205-5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,8,12]]},"references-count":107,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2025,2]]}},"alternative-id":["2205"],"URL":"https:\/\/doi.org\/10.1007\/s11263-024-02205-5","relation":{"correction":[{"id-type":"doi","id":"10.1007\/s11263-024-02242-0","asserted-by":"object"}]},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"value":"0920-5691","type":"print"},{"value":"1573-1405","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,8,12]]},"assertion":[{"value":"15 October 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"27 July 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"12 August 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"25 September 2024","order":4,"name":"change_date","label":"Change Date","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"Correction","order":5,"name":"change_type","label":"Change Type","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"A Correction to this paper has been published:","order":6,"name":"change_details","label":"Change Details","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"https:\/\/doi.org\/10.1007\/s11263-024-02242-0","URL":"https:\/\/doi.org\/10.1007\/s11263-024-02242-0","order":7,"name":"change_details","label":"Change Details","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The author declares that he has no confict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"The code is now available at .","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Code availability"}}]}}