{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,13]],"date-time":"2026-03-13T04:54:43Z","timestamp":1773377683336,"version":"3.50.1"},"reference-count":27,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,6,26]],"date-time":"2022-06-26T00:00:00Z","timestamp":1656201600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,6,26]],"date-time":"2022-06-26T00:00:00Z","timestamp":1656201600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001659","name":"Deutsche Forschungsgemeinschaft","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001659","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,6,26]]},"DOI":"10.1109\/isit50566.2022.9834569","type":"proceedings-article","created":{"date-parts":[[2022,8,3]],"date-time":"2022-08-03T15:34:22Z","timestamp":1659540862000},"page":"426-431","source":"Crossref","is-referenced-by-count":3,"title":["Regularization-wise double descent: Why it occurs and how to eliminate it"],"prefix":"10.1109","author":[{"given":"Fatih Furkan","family":"Yilmaz","sequence":"first","affiliation":[{"name":"Rice University,Dept. of Electrical and Computer Engineering"}]},{"given":"Reinhard","family":"Heckel","sequence":"additional","affiliation":[{"name":"Rice University,Dept. of Electrical and Computer Engineering"}]}],"member":"263","reference":[{"key":"ref10","article-title":"Understanding overfitting peaks in generalization error: analytical risk curves for l2 and l1 penalized interpolation","author":"mitra","year":"2019"},{"key":"ref11","article-title":"The generalization error of random features regression: precise asymptotics and double descent curve","author":"mei","year":"2019","journal-title":"Communications on Pure and Applied Mathematics"},{"key":"ref12","article-title":"Double trouble in double descent: bias and variance(s) in the lazy regime","author":"d\u2019ascoli","year":"2020","journal-title":"International Conference on Machine Learning (ICML)"},{"key":"ref13","article-title":"Optimization variance: Exploring generalization properties of DNNs","author":"zhang","year":"2021"},{"key":"ref14","author":"nakkiran","year":"2019"},{"key":"ref15","article-title":"On the multiple descent of minimum-norm interpolants and restricted lower isometry of kernels","author":"liang","year":"2020","journal-title":"Proceedings of Thirty Third Conference on Learning Theory"},{"key":"ref16","article-title":"Implicit regularization of random feature models","author":"jacot","year":"2020","journal-title":"International Conference on Machine Learning (ICML)"},{"key":"ref17","article-title":"Rethinking bias-variance trade-off for generalization of neural networks","author":"yang","year":"2020","journal-title":"International Conference on Machine Learning (ICML)"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1214\/19-AOS1849"},{"key":"ref19","article-title":"A modern take on the bias-variance tradeoff in neural networks","author":"neal","year":"2019"},{"key":"ref4","article-title":"Deep double descent: Where bigger models and more data hurt","author":"nakkiran","year":"2020","journal-title":"International Conference on Learning Representations (ICLR)"},{"key":"ref27","article-title":"A continuous-time view of early stopping for least squares regression","author":"ali","year":"2019","journal-title":"International Conference on Artificial Intelligence and Statistics (AISTATS)"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.1903070116"},{"key":"ref6","article-title":"Image recognition from raw labels collected without annotators","author":"yilmaz","year":"2020"},{"key":"ref5","article-title":"A closer look at memorization in deep networks","author":"arpit","year":"2017","journal-title":"International Conference on Machine Learning (ICML)"},{"key":"ref8","article-title":"Surprises in high-dimensional ridgeless least squares interpolation","author":"hastie","year":"2019"},{"key":"ref7","article-title":"Early stopping in deep networks: Double descent and how to eliminate it","author":"heckel","year":"2021","journal-title":"International Conference on Learning Representations (ICLR)"},{"key":"ref2","article-title":"Statistical mechanics of learning : Generalization","author":"opper","year":"1995","journal-title":"The Handbook of Brain Theory and Neural Networks"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1137\/20M1336072"},{"key":"ref1","article-title":"Understanding deep learning requires rethinking generalization","author":"zhang","year":"2017","journal-title":"International Conference on Learning Representations (ICLR)"},{"key":"ref20","article-title":"Understanding double descent requires a fine-grained bias-variance decomposition","author":"adlam","year":"2020","journal-title":"Advances in Neural IInformation Processing Systems"},{"key":"ref22","article-title":"Optimal regularization can mitigate double descent","author":"nakkiran","year":"2021","journal-title":"International Conference on Learning Representations (ICLR)"},{"key":"ref21","article-title":"On the optimal weighted ?2 regularization in overparameterized linear regression","author":"wu","year":"2020","journal-title":"Advances in Neural IInformation Processing Systems"},{"key":"ref24","article-title":"The neural tangent kernel in high dimensions: Triple descent and a multi-scale theory of generalization","author":"adlam","year":"2020","journal-title":"International Conference on Machine Learning (ICML)"},{"key":"ref23","article-title":"Neural tangent kernel: Convergence and generalization in neural networks","author":"jacot","year":"2018","journal-title":"Advances in Neural IInformation Processing Systems"},{"key":"ref26","article-title":"On the training dynamics of deep networks with L2 regularization","author":"lewkowycz","year":"2020","journal-title":"Advances in Neural IInformation Processing Systems"},{"key":"ref25","article-title":"Regularization matters: Generalization and optimization of neural nets vs their induced kernel","author":"wei","year":"2019","journal-title":"Advances in Neural IInformation Processing Systems"}],"event":{"name":"2022 IEEE International Symposium on Information Theory (ISIT)","location":"Espoo, Finland","start":{"date-parts":[[2022,6,26]]},"end":{"date-parts":[[2022,7,1]]}},"container-title":["2022 IEEE International Symposium on Information Theory (ISIT)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9834325\/9834269\/09834569.pdf?arnumber=9834569","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,12]],"date-time":"2026-03-12T20:34:51Z","timestamp":1773347691000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9834569\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,6,26]]},"references-count":27,"URL":"https:\/\/doi.org\/10.1109\/isit50566.2022.9834569","relation":{},"subject":[],"published":{"date-parts":[[2022,6,26]]}}}