{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,1,7]],"date-time":"2024-01-07T00:15:36Z","timestamp":1704586536455},"reference-count":17,"publisher":"Institute of Electronics, Information and Communications Engineers (IEICE)","issue":"1","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEICE Trans. Inf. &amp; Syst."],"published-print":{"date-parts":[[2024,1,1]]},"DOI":"10.1587\/transinf.2023edl8056","type":"journal-article","created":{"date-parts":[[2023,12,31]],"date-time":"2023-12-31T22:39:13Z","timestamp":1704062353000},"page":"144-147","source":"Crossref","is-referenced-by-count":0,"title":["Negative Learning to Prevent Undesirable Misclassification"],"prefix":"10.1587","volume":"E107.D","author":[{"given":"Kazuki","family":"EGASHIRA","sequence":"first","affiliation":[{"name":"The University of Tokyo"}]},{"given":"Atsuyuki","family":"MIYAI","sequence":"additional","affiliation":[{"name":"The University of Tokyo"}]},{"given":"Qing","family":"YU","sequence":"additional","affiliation":[{"name":"The University of Tokyo"}]},{"given":"Go","family":"IRIE","sequence":"additional","affiliation":[{"name":"Tokyo University of Science"}]},{"given":"Kiyoharu","family":"AIZAWA","sequence":"additional","affiliation":[{"name":"The University of Tokyo"}]}],"member":"532","reference":[{"key":"1","unstructured":"[1] T. Simonite, \u201cWhen It Comes to Gorillas, Google Photos Remains Blind.\u201d https:\/\/www.wired.com\/story\/when-it-comes-to-gorillas-google-photos-remains-blind\/, (Retrieved: 2023-02-05)."},{"key":"2","unstructured":"[2] A. Radford, J.W. Kim, C. Hallacy, A. Ramesh, G. Goh, S. Agarwal, G. Sastry, A. Askell, P. Mishkin, J. Clark, et al., \u201cLearning transferable visual models from natural language supervision,\u201d ICML, 2021."},{"key":"3","doi-asserted-by":"crossref","unstructured":"[3] P. Goyal, A.R. Soriano, C. Hazirbas, L. Sagun, and N. Usunier, \u201cFairness indicators for systematic assessments of visual feature extractors,\u201d ACM FAccT, pp.70-88, 2022. 10.1145\/3531146.3533074","DOI":"10.1145\/3531146.3533074"},{"key":"4","doi-asserted-by":"crossref","unstructured":"[4] Y. Kim, J. Yim, J. Yun, and J. Kim, \u201cNlnl: Negative learning for noisy labels,\u201d ICCV, pp.101-110, 2019. 10.1109\/iccv.2019.00019","DOI":"10.1109\/ICCV.2019.00019"},{"key":"5","doi-asserted-by":"crossref","unstructured":"[5] K. Yang, K. Qinami, L. Fei-Fei, J. Deng, and O. Russakovsky, \u201cTowards fairer datasets: Filtering and balancing the distribution of the people subtree in the imagenet hierarchy,\u201d ACM FaccT, pp.547-558, 2020. 10.1145\/3351095.3375709","DOI":"10.1145\/3351095.3375709"},{"key":"6","doi-asserted-by":"crossref","unstructured":"[6] J. Deng, W. Dong, R. Socher, L.J. Li, K. Li, and L. Fei-Fei, \u201cImagenet: A large-scale hierarchical image database,\u201d CVPR, pp.248-255, 2009. 10.1109\/cvpr.2009.5206848","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"7","unstructured":"[7] T. Ishida, G. Niu, W. Hu, and M. Sugiyama, \u201cLearning from complementary labels,\u201d NeurIPS, 2017."},{"key":"8","doi-asserted-by":"crossref","unstructured":"[8] H. Tokunaga, B.K. Iwana, Y. Teramoto, A. Yoshizawa, and R. Bise, \u201cNegative pseudo labeling using class proportion for semantic segmentation in pathology,\u201d ECCV, vol.12360, pp.430-446, 2020. 10.1007\/978-3-030-58555-6_26","DOI":"10.1007\/978-3-030-58555-6_26"},{"key":"9","unstructured":"[9] M.N. Rizve, K. Duarte, Y.S. Rawat, and M. Shah, \u201cIn defense of pseudo-labeling: An uncertainty-aware pseudo-label selection framework for semi-supervised learning,\u201d ICLR, 2021."},{"key":"10","doi-asserted-by":"crossref","unstructured":"[10] Y. Kim, J. Yun, H. Shon, and J. Kim, \u201cJoint negative and positive learning for noisy labels,\u201d CVPR, pp.9437-9446, 2021. 10.1109\/cvpr46437.2021.00932","DOI":"10.1109\/CVPR46437.2021.00932"},{"key":"11","unstructured":"[11] A. Krizhevsky, G. Hinton, et al., \u201cLearning multiple layers of features from tiny images,\u201d Technical Report., 2009."},{"key":"12","doi-asserted-by":"crossref","unstructured":"[12] M.-E. Nilsback and A. Zisserman, \u201cAutomated flower classification over a large number of classes,\u201d ICVGIP, pp.722-729, 2008. 10.1109\/icvgip.2008.47","DOI":"10.1109\/ICVGIP.2008.47"},{"key":"13","doi-asserted-by":"crossref","unstructured":"[13] K. He, X. Zhang, S. Ren, and J. Sun, \u201cDeep residual learning for image recognition,\u201d CVPR, pp.770-778, 2016. 10.1109\/cvpr.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"14","doi-asserted-by":"crossref","unstructured":"[14] H. Robbins and S. Monro, \u201cA stochastic approximation method,\u201d The annals of mathematical statistics, vol.22, no.3, pp.400-407, 1951. 10.1214\/aoms\/1177729586","DOI":"10.1214\/aoms\/1177729586"},{"key":"15","doi-asserted-by":"crossref","unstructured":"[15] C. Szegedy, V. Vanhoucke, S. Ioffe, J. Shlens, and Z. Wojna, \u201cRethinking the inception architecture for computer vision,\u201d CVPR, pp.2818-2826, 2016. 10.1109\/cvpr.2016.308","DOI":"10.1109\/CVPR.2016.308"},{"key":"16","unstructured":"[16] G. Hinton, O. Vinyals, J. Dean, et al., \u201cDistilling the knowledge in a neural network,\u201d NIPS Workshop, 2015."},{"key":"17","doi-asserted-by":"publisher","unstructured":"[17] B.-B. Gao, C. Xing, C.-W. Xie, J. Wu, and X. Geng, \u201cDeep label distribution learning with label ambiguity,\u201d IEEE Trans. Image Process., vol.26, no.6, pp.2825-2838, 2017. 10.1109\/tip.2017.2689998","DOI":"10.1109\/TIP.2017.2689998"}],"container-title":["IEICE Transactions on Information and Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.jstage.jst.go.jp\/article\/transinf\/E107.D\/1\/E107.D_2023EDL8056\/_pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,6]],"date-time":"2024-01-06T04:14:22Z","timestamp":1704514462000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.jstage.jst.go.jp\/article\/transinf\/E107.D\/1\/E107.D_2023EDL8056\/_article"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,1,1]]},"references-count":17,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2024]]}},"URL":"https:\/\/doi.org\/10.1587\/transinf.2023edl8056","relation":{},"ISSN":["0916-8532","1745-1361"],"issn-type":[{"value":"0916-8532","type":"print"},{"value":"1745-1361","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,1,1]]},"article-number":"2023EDL8056"}}