{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,4,22]],"date-time":"2025-04-22T19:02:56Z","timestamp":1745348576325,"version":"3.40.3"},"publisher-location":"Cham","reference-count":33,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031442032"},{"type":"electronic","value":"9783031442049"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-44204-9_18","type":"book-chapter","created":{"date-parts":[[2023,9,21]],"date-time":"2023-09-21T04:02:11Z","timestamp":1695268931000},"page":"211-222","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Evidential Robust Deep Learning for\u00a0Noisy Text2text Question Classification"],"prefix":"10.1007","author":[{"given":"Haoran","family":"Wang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0743-0121","authenticated-orcid":false,"given":"Jiyao","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Yuqiu","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Zehua","family":"Peng","sequence":"additional","affiliation":[]},{"given":"Zuping","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,9,22]]},"reference":[{"key":"18_CR1","unstructured":"Amid, E., et al.: Robust bi-tempered logistic loss based on Bregman divergences. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"18_CR2","unstructured":"Gal, Y., Ghahramani, Z.: Bayesian convolutional neural networks with Bernoulli approximate variational inference. arXiv preprint arXiv:1506.02158 (2015)"},{"key":"18_CR3","doi-asserted-by":"crossref","unstructured":"Ghosh, A., Kumar, H., Sastry, P.S.: Robust loss functions under label noise for deep neural networks. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 31, no. 1 (2017)","DOI":"10.1609\/aaai.v31i1.10894"},{"key":"18_CR4","unstructured":"Gordon, J., Shortliffe, E.H.: The Dempster-Shafer theory of evidence. In: Rule-Based Expert Systems: The MYCIN Experiments of the Stanford Heuristic Programming Project, vol. 3, pp. 832\u2013838 (1984)"},{"key":"18_CR5","unstructured":"Hjelm, R.D., et al.: Learning deep representations by mutual information estimation and maximization. arXiv preprint arXiv:1808.06670 (2018)"},{"key":"18_CR6","unstructured":"Ioffe, S., Szegedy, C.: Batch normalization: accelerating deep network training by reducing internal covariate shift. In: International Conference on Machine Learning, pp. 448\u2013456. PMLR (2015)"},{"key":"18_CR7","doi-asserted-by":"crossref","unstructured":"Jenni, S., Favaro, P.: Deep bilevel learning. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 618\u2013633 (2018)","DOI":"10.1007\/978-3-030-01249-6_38"},{"key":"18_CR8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-42337-1","volume-title":"Subjective Logic","author":"A J\u00f8sang","year":"2016","unstructured":"J\u00f8sang, A.: Subjective Logic, vol. 3. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-42337-1"},{"key":"18_CR9","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-42337-1","volume-title":"Subjective Logic: A Formalism for Reasoning Under Uncertainty","author":"A J\u00f8sang","year":"2018","unstructured":"J\u00f8sang, A.: Subjective Logic: A Formalism for Reasoning Under Uncertainty. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-319-42337-1"},{"issue":"4","key":"18_CR10","doi-asserted-by":"publisher","first-page":"150","DOI":"10.3390\/info10040150","volume":"10","author":"K Kowsari","year":"2019","unstructured":"Kowsari, K., et al.: Text classification algorithms: a survey. Information 10(4), 150 (2019)","journal-title":"Information"},{"key":"18_CR11","unstructured":"Krogh, A., Hertz, J.: A simple weight decay can improve generalization. In: Advances in Neural Information Processing Systems, vol. 4 (1991)"},{"key":"18_CR12","doi-asserted-by":"crossref","unstructured":"Liu, P., et al.: Multi-timescale long short-term memory neural network for modelling sentences and documents. In: Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pp. 2326\u20132335 (2015)","DOI":"10.18653\/v1\/D15-1280"},{"key":"18_CR13","unstructured":"Lyu, Y., Tsang, I.W.: Curriculum loss: Robust learning and generalization against label corruption. arXiv preprint arXiv:1905.10045 (2019)"},{"key":"18_CR14","unstructured":"Ma, X., et al.: Normalized loss functions for deep learning with noisy labels. In: International Conference on Machine Learning, pp. 6543\u20136553. PMLR (2020)"},{"issue":"1","key":"18_CR15","doi-asserted-by":"publisher","first-page":"1","DOI":"10.3758\/s13428-011-0124-6","volume":"44","author":"W Mason","year":"2012","unstructured":"Mason, W., Suri, S.: Conducting behavioral research on Amazon\u2019s mechanical turk. Behav. Res. Methods 44(1), 1\u201323 (2012)","journal-title":"Behav. Res. Methods"},{"key":"18_CR16","doi-asserted-by":"crossref","unstructured":"Minaee, S., et al.: Deep learning-based text classification: a comprehensive review. In: ACM Computing Surveys (CSUR), pp. 1\u201340. vol. 54, no. 3, pp. 1\u201340 (2021)","DOI":"10.1145\/3439726"},{"issue":"5","key":"18_CR17","doi-asserted-by":"publisher","first-page":"411","DOI":"10.1017\/S1930297500002205","volume":"5","author":"G Paolacci","year":"2010","unstructured":"Paolacci, G., Chandler, J., Ipeirotis, P.G.: Running experiments on amazon mechanical turk. Judgment Decis. Making 5(5), 411\u2013419 (2010)","journal-title":"Judgment Decis. Making"},{"key":"18_CR18","doi-asserted-by":"crossref","unstructured":"Peng, H., et al.: Incrementally learning the hierarchical softmax function for neural language models. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 31, no. 1 (2017)","DOI":"10.1609\/aaai.v31i1.10994"},{"key":"18_CR19","unstructured":"Pereyra, G., et al.: Regularizing neural networks by penalizing confident output distributions. arXiv preprint arXiv:1701.06548 (2017)"},{"key":"18_CR20","doi-asserted-by":"publisher","first-page":"143","DOI":"10.1017\/S0962492900002919","volume":"8","author":"A Pinkus","year":"1999","unstructured":"Pinkus, A.: Approximation theory of the MLP model in neural networks. Acta Numer 8, 143\u2013195 (1999)","journal-title":"Acta Numer"},{"key":"18_CR21","doi-asserted-by":"crossref","unstructured":"Rao, J., et al.: Bridging the gap between relevance matching and semantic matching for short text similarity modeling. In: Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 5370\u20135381 (2019)","DOI":"10.18653\/v1\/D19-1540"},{"key":"18_CR22","doi-asserted-by":"publisher","first-page":"127","DOI":"10.1023\/A:1010091220143","volume":"1","author":"R Rubinstein","year":"1999","unstructured":"Rubinstein, R.: The cross-entropy method for combinatorial and continuous optimization. Methodol. Comput. Appl. Probab. 1, 127\u2013190 (1999)","journal-title":"Methodol. Comput. Appl. Probab."},{"key":"18_CR23","unstructured":"Scott, C., Blanchard, G., Handy, G.: Classification with asymmetric label noise: consistency and Maximal Denoising. In: Conference on Learning Theory, pp. 489\u2013511. PMLR (2013)"},{"key":"18_CR24","unstructured":"Sensoy, M., Kaplan, L., Kandemir, M.: Evidential deep learning to quantify classification uncertainty. In: Advances in Neural Information Processing Systems, vol. 31 (2018)"},{"key":"18_CR25","doi-asserted-by":"crossref","unstructured":"Shen, Y., et al.: Learning semantic representations using convolutional neural networks for web search. In: Proceedings of the 23rd International Conference on World Wide Web, pp. 373\u2013374 (2014)","DOI":"10.1145\/2567948.2577348"},{"key":"18_CR26","doi-asserted-by":"crossref","unstructured":"Song, H., et al.: Learning from noisy labels with deep neural networks: a survey. IEEE Trans. Neural Netw. Learn. Syst. (2022)","DOI":"10.1109\/TNNLS.2022.3152527"},{"key":"18_CR27","first-page":"16857","volume":"33","author":"K Song","year":"2020","unstructured":"Song, K., et al.: MPNeT: masked and permuted pre-training for language understanding. Adv. Neural. Inf. Process. Syst. 33, 16857\u201316867 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"18_CR28","doi-asserted-by":"publisher","first-page":"110110","DOI":"10.1016\/j.knosys.2022.110110","volume":"260","author":"J Wang","year":"2023","unstructured":"Wang, J., et al.: Multi-aspect co-attentional collaborative filtering for extreme multi-label text classification. Knowl. Based Syst. 260, 110110 (2023)","journal-title":"Knowl. Based Syst."},{"key":"18_CR29","doi-asserted-by":"publisher","first-page":"126299","DOI":"10.1016\/j.neucom.2023.126299","volume":"544","author":"J Wang","year":"2023","unstructured":"Wang, J., et al.: Preciser comparison: augmented multi-layer dynamic contrastive strategy for text2text question classification. Neurocomputing 544, 126299 (2023)","journal-title":"Neurocomputing"},{"key":"18_CR30","unstructured":"Xia, X., et al.: Are anchor points really indispensable in label-noise learning? In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"18_CR31","unstructured":"Xia, X., et al.: Robust early-learning: Hindering the memorization of noisy labels. In: International Conference on Learning Representations (2020)"},{"key":"18_CR32","unstructured":"Yu, X., et al.: How does disagreement help generalization against label corruption? In: International Conference on Machine Learning. PMLR, pp. 7164\u20137173 (2019)"},{"key":"18_CR33","unstructured":"Zhang, Z., Sabuncu, M.: Generalized cross entropy loss for training deep neural networks with noisy labels. In: Advances in Neural Information Processing Systems, vol. 31 (2018)"}],"container-title":["Lecture Notes in Computer Science","Artificial Neural Networks and Machine Learning \u2013 ICANN 2023"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-44204-9_18","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,9,21]],"date-time":"2023-09-21T06:26:28Z","timestamp":1695277588000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-44204-9_18"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031442032","9783031442049"],"references-count":33,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-44204-9_18","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"22 September 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICANN","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Artificial Neural Networks","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Heraklion","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Greece","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 September 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"32","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icann2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/e-nns.org\/icann2023\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"easyacademia.org","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"947","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"426","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"22","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"45% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.4","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"type of other papers accepted  : 9 Abstract","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}