{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,25]],"date-time":"2025-11-25T06:07:33Z","timestamp":1764050853527,"version":"3.45.0"},"reference-count":52,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,9]],"date-time":"2025-10-09T00:00:00Z","timestamp":1759968000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,9]],"date-time":"2025-10-09T00:00:00Z","timestamp":1759968000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,9]]},"DOI":"10.1109\/dsaa65442.2025.11248029","type":"proceedings-article","created":{"date-parts":[[2025,11,24]],"date-time":"2025-11-24T18:56:45Z","timestamp":1764010605000},"page":"1-10","source":"Crossref","is-referenced-by-count":0,"title":["Abstention is all you need"],"prefix":"10.1109","author":[{"given":"Erik","family":"Sch\u00f6nw\u00e4lder","sequence":"first","affiliation":[{"name":"Technische Universit&#x00E4;t Dresden,Database Research Group,Dresden,Germany"}]},{"given":"Christian","family":"Falkenberg","sequence":"additional","affiliation":[{"name":"Technische Universit&#x00E4;t Dresden,Database Research Group,Dresden,Germany"}]},{"given":"Claudio","family":"Hartmann","sequence":"additional","affiliation":[{"name":"Technische Universit&#x00E4;t Dresden,Database Research Group,Dresden,Germany"}]},{"given":"Wolfgang","family":"Lehner","sequence":"additional","affiliation":[{"name":"Technische Universit&#x00E4;t Dresden,Database Research Group,Dresden,Germany"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Bert: pre-training of deep bidirectional transformers for language understanding","volume-title":"Proceedings of NAACL-HLT","author":"Devlin","year":"2019"},{"key":"ref2","article-title":"Language models are few-shot learners","author":"Brown","year":"2020","journal-title":"Neural Information Processing Systems (NeurIPS)"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1162\/coli.a.16"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.557"},{"key":"ref5","article-title":"Deep neu-ral network benchmarks for selective classification","author":"Pugnana","year":"2024","journal-title":"ar Xiv preprint"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-023-10562-9"},{"key":"ref7","article-title":"Predictor-rejector multi-class abstention: Theoretical analysis and algorithms","volume-title":"Proceedings of ALT","author":"Mao","year":"2024"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46379-7_5"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-024-06534-x"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TEC.1957.5222035"},{"key":"ref11","article-title":"Selective classification for deep neural networks","author":"Geifman","year":"2017","journal-title":"Neural Information Processing Systems (NeurIPS)"},{"key":"ref12","article-title":"Generating with confidence: Uncertainty quantification for black-box large language models","volume-title":"TMLR","author":"Lin","year":"2024"},{"key":"ref13","article-title":"Optimal strategies for reject option classifiers","volume-title":"JMLR","author":"Franc","year":"2023"},{"key":"ref14","article-title":"Simple and scalable predictive uncertainty estimation using deep ensembles","author":"Lakshminarayanan","year":"2017","journal-title":"Neural Information Processing Systems (NeurIPS)"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414046"},{"key":"ref16","article-title":"Semantic uncertainty: Linguistic invariances for uncertainty estimation in natural language generation","volume-title":"ICLR","author":"Kuhn","year":"2023"},{"key":"ref17","article-title":"Selectivenet: A deep neural network with an integrated reject option","volume-title":"ICML","author":"Geifman","year":"2019"},{"key":"ref18","article-title":"Self-adaptive training: beyond empirical risk minimization","author":"Huang","year":"2020","journal-title":"Neural Information Processing Systems (NeurIPS)"},{"key":"ref19","article-title":"Deep gamblers: Learning to abstain with portfolio theory","author":"Ziyin","year":"2019","journal-title":"Neural Information Processing Systems (Ne u rIPS)"},{"key":"ref20","article-title":"Language models (mostly) know what they know","author":"Kadavath","year":"2022","journal-title":"arXiv preprint"},{"key":"ref21","article-title":"To believe or not to believe your LLM","author":"Abbasi-Yadkori","year":"2024","journal-title":"arXiv preprint"},{"key":"ref22","article-title":"Reject before you run: Small assessors anticipate big language models","author":"Zhou","year":"2022","journal-title":"IJCAI-ECAI"},{"key":"ref23","article-title":"Unsupervised anomaly detection with rejection","author":"Perini","year":"2023","journal-title":"Neural Information Processing Systems (Ne u rIPS)"},{"volume-title":"Code and data repository for: Abstention is all you need","year":"2025","author":"Schonwalder","key":"ref24"},{"key":"ref25","article-title":"Dropout as a bayesian approximation: Representing model uncertainty in deep learning","volume-title":"ICML","author":"Gal","year":"2016"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.84"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.503"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i8.26133"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00407"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.repl4nlp-1.23"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.276"},{"key":"ref32","article-title":"Uncertainty quantification in fine-tuned llms using lora ensembles","author":"Balabanov","year":"2024","journal-title":"arXiv preprint"},{"key":"ref33","article-title":"Combating label noise in deep learning using abstention","volume-title":"ICML","author":"Thulasidasan","year":"2019"},{"key":"ref34","article-title":"Towards better selective classification","volume-title":"ICLR","author":"Feng","year":"2023"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.345"},{"key":"ref36","article-title":"Can llms express their uncertainty? an empirical evaluation of confidence elicitation in llms","volume-title":"ICLR","author":"Xiong","year":"2024"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i11.21487"},{"key":"ref38","article-title":"100 instances is all you need: predicting the success of a new LLM on unseen data by testing on a few instances","author":"Pacchiardi","year":"2024","journal-title":"arXiv preprint"},{"key":"ref39","article-title":"A novel reject option applied to sleep stage scoring","volume-title":"SDM","author":"der Plas","year":"2023"},{"key":"ref40","article-title":"Two-stage learning to defer with multiple experts","author":"Mao","year":"2023","journal-title":"Neural Information Processing Systems (Ne u rIPS)"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/DSAA49011.2020.00058"},{"key":"ref42","article-title":"Phi-3 technical report: A highly capable language model locally on your phone","author":"Abdin","year":"2024","journal-title":"arXiv preprint"},{"key":"ref43","article-title":"The llama 3 herd of models","author":"Dubey","year":"2024","journal-title":"arXiv preprint"},{"key":"ref44","article-title":"GLUE: A multi-task benchmark and analysis platform for natural language understanding","volume-title":"ICLR","author":"Wang","year":"2019"},{"key":"ref45","doi-asserted-by":"crossref","DOI":"10.18653\/v1\/D16-1264","article-title":"Squad: 100, 000+ auestions for machine comprehension of text","volume-title":"EMNLP","author":"Rajpurkar","year":"2016"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P17-1147"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.5555\/1953048.2078195"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1108\/eb026526"},{"key":"ref49","article-title":"Distilbert, a distilled version of BERT: smaller, faster, cheaper and lighter","author":"Sanh","year":"2019","journal-title":"ar Xiv preprint"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-acl.158"},{"journal-title":"Spam does not bring us joy-ridding gmail of 100 million more spam messages with tensorflow","year":"2019","author":"Kumaran","key":"ref51"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.3115\/1218955.1219032"}],"event":{"name":"2025 IEEE 12th International Conference on Data Science and Advanced Analytics (DSAA)","start":{"date-parts":[[2025,10,9]]},"location":"Birmingham, United Kingdom","end":{"date-parts":[[2025,10,12]]}},"container-title":["2025 IEEE 12th International Conference on Data Science and Advanced Analytics (DSAA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11247920\/11247921\/11248029.pdf?arnumber=11248029","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,25]],"date-time":"2025-11-25T05:59:06Z","timestamp":1764050346000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11248029\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,9]]},"references-count":52,"URL":"https:\/\/doi.org\/10.1109\/dsaa65442.2025.11248029","relation":{},"subject":[],"published":{"date-parts":[[2025,10,9]]}}}