{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,20]],"date-time":"2026-02-20T05:55:37Z","timestamp":1771566937503,"version":"3.50.1"},"reference-count":91,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"HPC resources from GENCI-IDRIS","award":["2020-AD011011970"],"award-info":[{"award-number":["2020-AD011011970"]}]},{"name":"HPC resources from GENCI-IDRIS","award":["2021-AD011011970R1"],"award-info":[{"award-number":["2021-AD011011970R1"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2024,4]]},"DOI":"10.1109\/tpami.2023.3328829","type":"journal-article","created":{"date-parts":[[2023,10,31]],"date-time":"2023-10-31T17:56:21Z","timestamp":1698774981000},"page":"2027-2040","source":"Crossref","is-referenced-by-count":17,"title":["Encoding the Latent Posterior of Bayesian Neural Networks for Uncertainty Quantification"],"prefix":"10.1109","volume":"46","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-2184-1381","authenticated-orcid":false,"given":"Gianni","family":"Franchi","sequence":"first","affiliation":[{"name":"Institut Polytechnique de Paris, Paris, France"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2485-9402","authenticated-orcid":false,"given":"Andrei","family":"Bursuc","sequence":"additional","affiliation":[{"name":"valeo.ai, Paris, France"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7065-4809","authenticated-orcid":false,"given":"Emanuel","family":"Aldea","sequence":"additional","affiliation":[{"name":"Paris Saclay University, Gif-sur-Yvette, France"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7306-4134","authenticated-orcid":false,"given":"S\u00e9verine","family":"Dubuisson","sequence":"additional","affiliation":[{"name":"Aix Marseille University, Marseille, France"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6984-1532","authenticated-orcid":false,"given":"Isabelle","family":"Bloch","sequence":"additional","affiliation":[{"name":"CNRS, LIP6, Sorbonne Universit&#x00E9;, Paris, France"}]}],"member":"263","reference":[{"key":"ref1","first-page":"6405","article-title":"Simple and scalable predictive uncertainty estimation using deep ensembles","volume-title":"Proc. 31st Int. Conf. Neural Inf. Process. Syst.","author":"Lakshminarayanan"},{"key":"ref2","article-title":"Pitfalls of in-domain uncertainty estimation and ensembling in deep learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Ashukha"},{"key":"ref3","article-title":"A simple baseline for Bayesian uncertainty in deep learning","volume-title":"Proc. 33rd Int. Conf. Neural Inf. Process. Syst.","author":"Maddox"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58520-4_7"},{"key":"ref5","article-title":"Deep ensembles: A loss landscape perspective","author":"Fort","year":"2019"},{"key":"ref6","article-title":"Uncertainty in deep learning","author":"Gal","year":"2016"},{"key":"ref7","first-page":"7047","article-title":"Predictive uncertainty estimation via prior networks","volume-title":"Proc. 32nd Int. Conf. Neural Inf. Process. Syst.","author":"Malinin"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/3461702.3462571"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1038\/s41746-020-00367-3"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2017\/661"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1992.4.3.448"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4612-0745-0"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-94-011-5014-9_5"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/168304.168306"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.5555\/2986459.2986721"},{"key":"ref16","first-page":"1613","article-title":"Weight uncertainty in neural networks","volume-title":"Proc. 32nd Int. Conf. Mach. Learn.","author":"Blundell"},{"key":"ref17","article-title":"Probabilistic backpropagation for scalable learning of Bayesian neural networks","volume-title":"Proc. 32nd Int. Conf. Mach. Learn.","author":"Hern\u00e1ndez-Lobato"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.5555\/3045390.3045502"},{"key":"ref19","first-page":"6248","article-title":"SLANG: Fast structured covariance approximations for Bayesian deep learning with natural gradient","volume-title":"Proc. 32nd Int. Conf. Neural Inf. Process. Syst.","author":"Mishkin"},{"key":"ref20","article-title":"On the expressiveness of approximate inference in Bayesian neural networks","volume-title":"Proc. 34th Int. Conf. Neural Inf. Process. Syst.","author":"Foong"},{"key":"ref21","volume-title":"Deep Learning","author":"Bengio","year":"2017"},{"key":"ref22","first-page":"901","article-title":"Weight normalization: A simple reparameterization to accelerate training of deep neural networks","volume-title":"Proc. 30th Int. Conf. Neural Inf. Process. Syst.","author":"Salimans"},{"key":"ref23","first-page":"1929","article-title":"Dropout: A simple way to prevent neural networks from overfitting","volume":"15","author":"Srivastava","year":"2014","journal-title":"J. Mach. Learn. Res."},{"key":"ref24","article-title":"Can you trust your models uncertainty? Evaluating predictive uncertainty under dataset shift","volume-title":"Proc. 33rd Int. Conf. Neural Inf. Process. Syst.","author":"Ovadia"},{"key":"ref25","article-title":"Efficient and scalable Bayesian neural nets with rank-1 factors","volume-title":"Proc. 37th Int. Conf. Mach. Learn.","author":"Dusenberry"},{"key":"ref26","first-page":"1708","article-title":"Structured and efficient variational deep learning with matrix Gaussian posteriors","volume-title":"Proc. 33rd Int. Conf. Mach. Learn.","author":"Louizos"},{"key":"ref27","first-page":"1283","article-title":"Learning structured weight uncertainty in Bayesian neural networks","volume-title":"Proc. 20th Int. Conf. Artif. Intell. Statist.","author":"Sun"},{"key":"ref28","first-page":"5852","article-title":"Noisy natural gradient as variational inference","volume-title":"Proc. 35th Int. Conf. Mach. Learn.","author":"Zhang"},{"key":"ref29","article-title":"Auto-encoding variational Bayes","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kingma"},{"key":"ref30","article-title":"BatchEnsemble: An alternative approach to efficient ensemble and lifelong learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wen"},{"key":"ref31","first-page":"1278","article-title":"Stochastic backpropagation and approximate inference in deep generative models","volume-title":"Proc. 31st Int. Conf. Mach. Learn.","author":"Rezende"},{"key":"ref32","article-title":"Weight standardization","author":"Qiao","year":"2019","journal-title":""},{"key":"ref33","article-title":"PEP: Parameter ensembling by perturbation","volume-title":"Proc. 34th Int. Conf. Neural Inf. Process. Syst.","author":"Mehrtash"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref35","article-title":"Measuring the intrinsic dimension of objective landscapes","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Li"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1016\/S0951-8320(96)00077-4"},{"key":"ref37","article-title":"Bayesian deep learning and a probabilistic perspective of generalization","volume-title":"Proc. 34th Int. Conf. Neural Inf. Process. Syst.","author":"Wilson"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.5244\/C.30.87"},{"key":"ref39","article-title":"Learning multiple layers of features from tiny images","author":"Krizhevsky","year":"2009"},{"key":"ref40","article-title":"Benchmarking neural network robustness to common corruptions and perturbations","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Hendrycks"},{"key":"ref41","article-title":"Reading digits in natural images with unsupervised feature learning","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Netzer"},{"key":"ref42","article-title":"Hierarchical Gaussian process priors for Bayesian neural network weights","volume-title":"Proc. 34th Int. Conf. Neural Inf. Process. Syst.","author":"Karaletsos"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-27497-8"},{"key":"ref44","first-page":"876","article-title":"Averaging weights leads to wider optima and better generalization","volume-title":"Proc. Conf. Uncertainty Artif. Intell.","author":"Izmailov"},{"key":"ref45","article-title":"DICE: Diversity in deep ensembles via conditional redundancy adversarial estimation","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Rame"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1007\/3-540-44938-8_9"},{"key":"ref47","article-title":"Bayesian methods for adaptive models","author":"MacKay","year":"1992"},{"key":"ref48","article-title":"Structured weight priors for convolutional neural networks","volume-title":"Proc. Int. Conf. Mach. Learn. Workshops","author":"Pearce"},{"key":"ref49","article-title":"Deep Bayesian bandits showdown: An empirical comparison of Bayesian deep networks for thompson sampling","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Riquelme"},{"key":"ref50","article-title":"Probabilistic meta-representations of neural networks","volume-title":"Proc. Conf. Uncertainty Artif. Intell.","author":"Karaletsos"},{"key":"ref51","article-title":"Efficient approximate inference with Walsh-Hadamard variational inference","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Rossi"},{"key":"ref52","first-page":"1","article-title":"All you need is a good functional prior for Bayesian deep learning","volume":"23","author":"Tran","year":"2022","journal-title":"J. Mach. Learn. Res."},{"key":"ref53","article-title":"How good is the bayes posterior in deep neural networks really?","volume-title":"Proc. 37th Int. Conf. Mach. Learn.","author":"Wenzel"},{"key":"ref54","article-title":"Bayesian neural network priors revisited","volume-title":"Proc. Symp. Adv. Approx. Bayesian Inference","author":"Fortuin"},{"key":"ref55","article-title":"Why M heads are better than one: Training a diverse ensemble of deep networks","author":"Lee","year":"2015","journal-title":""},{"key":"ref56","article-title":"Snapshot ensembles: Train 1, get M for free","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Huang"},{"key":"ref57","first-page":"8803","article-title":"Loss surfaces, mode connectivity, and fast ensembling of DNNs","volume-title":"Proc. 32nd Int. Conf. Neural Inf. Process. Syst.","author":"Garipov"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-22796-8_28"},{"key":"ref59","article-title":"Training independent subnetworks for robust prediction","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Havasi"},{"key":"ref60","article-title":"Reverse KL-divergence training of prior networks: Improved uncertainty and adversarial robustness","volume-title":"Proc. 33rd Int. Conf. Neural Inf. Process. Syst.","author":"Malinin"},{"key":"ref61","first-page":"3183","article-title":"Evidential deep learning to quantify classification uncertainty","volume-title":"Proc. 32nd Int. Conf. Neural Inf. Process. Syst.","author":"Sensoy"},{"key":"ref62","article-title":"Posterior network: Uncertainty estimation without OOD samples via density-based pseudo-counts","volume-title":"Proc. 34th Int. Conf. Neural Inf. Process. Syst.","author":"Charpentier"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2020.12.011"},{"key":"ref64","article-title":"Being Bayesian about categorical probability","volume-title":"Proc. 37th Int. Conf. Mach. Learn.","author":"Joo"},{"key":"ref65","article-title":"Regression prior networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Malinin"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1093\/biomet\/57.1.97"},{"key":"ref67","first-page":"681","article-title":"Bayesian learning via stochastic gradient Langevin dynamics","volume-title":"Proc. 28th Int. Conf. Mach. Learn.","author":"Welling"},{"key":"ref68","first-page":"II-1683","article-title":"Stochastic gradient Hamiltonian Monte Carlo","volume-title":"Proc. 31st Int. Conf. Mach. Learn.","author":"Chen"},{"key":"ref69","article-title":"Cyclical stochastic gradient MCMC for Bayesian deep learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhang"},{"key":"ref70","article-title":"A contour stochastic gradient langevin dynamics algorithm for simulations of multi-modal distributions","volume-title":"Proc. 34th Int. Conf. Neural Inf. Process. Syst.","author":"Deng"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1103\/PhysRevLett.68.9"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1080\/00949655.2021.1958812"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-68445-1_53"},{"key":"ref74","article-title":"A baseline for detecting misclassified and out-of-distribution examples in neural networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Hendrycks"},{"key":"ref75","first-page":"9690","article-title":"Uncertainty estimation using a single deep deterministic neural network","volume-title":"Proc. 37th Int. Conf. Mach. Learn.","author":"Van Amersfoort"},{"key":"ref76","first-page":"1169","article-title":"Subspace inference for Bayesian deep learning","volume-title":"Proc. 35th Conf. Uncertainty Artif. Intell.","author":"Izmailov"},{"key":"ref77","article-title":"Functional variational Bayesian neural networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Sun"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-014-0733-5"},{"key":"ref79","first-page":"1321","article-title":"On calibration of modern neural networks","volume-title":"Proc. 34th Int. Conf. Mach. Learn.","author":"Guo"},{"key":"ref80","article-title":"Improving calibration of batchensemble with data augmentation","volume-title":"Proc. Int. Conf. Mach. Learn. Workshops","author":"Wen"},{"key":"ref81","article-title":"PyTorch: An imperative style, high-performance deep learning library","volume-title":"Proc. 33rd Int. Conf. Neural Inf. Process. Syst.","author":"Paszke"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.5555\/3295222.3295309"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/ICNN.1994.374138"},{"key":"ref84","article-title":"Improved regularization of convolutional neural networks with cutout","author":"DeVries","year":"2017","journal-title":""},{"key":"ref85","article-title":"A benchmark for anomaly segmentation","author":"Hendrycks","year":"2019","journal-title":""},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1802.02611"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.660"},{"key":"ref88","article-title":"Rethinking normalization and elimination singularity in neural networks","author":"Qiao","year":"2019","journal-title":""},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.5555\/3045118.3045167"},{"key":"ref90","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01261-8_1"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00271"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/34\/10461350\/10302334.pdf?arnumber=10302334","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,8]],"date-time":"2024-03-08T02:22:05Z","timestamp":1709864525000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10302334\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4]]},"references-count":91,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2023.3328829","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,4]]}}}