{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T15:59:56Z","timestamp":1774627196851,"version":"3.50.1"},"reference-count":63,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"1","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2025,1]]},"DOI":"10.1109\/tnnls.2023.3326654","type":"journal-article","created":{"date-parts":[[2023,11,3]],"date-time":"2023-11-03T18:08:22Z","timestamp":1699034902000},"page":"1533-1544","source":"Crossref","is-referenced-by-count":10,"title":["Understanding Deep Learning via Decision Boundary"],"prefix":"10.1109","volume":"36","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-7810-9346","authenticated-orcid":false,"given":"Shiye","family":"Lei","sequence":"first","affiliation":[{"name":"Sydney AI Centre and School of Computer Science, Faculty of Engineering, The University of Sydney, Darlington, NSW, Australia"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5584-2385","authenticated-orcid":false,"given":"Fengxiang","family":"He","sequence":"additional","affiliation":[{"name":"Artificial Intelligence and its Applications Institute, School of Informatics, University of Edinburgh, Edinburgh, U.K"}]},{"given":"Yancheng","family":"Yuan","sequence":"additional","affiliation":[{"name":"Department of Applied Mathematics, The Hong Kong Polytechnic University, Hung Hom, Hong Kong SAR"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7225-5449","authenticated-orcid":false,"given":"Dacheng","family":"Tao","sequence":"additional","affiliation":[{"name":"Sydney AI Centre and School of Computer Science, Faculty of Engineering, The University of Sydney, Darlington, NSW, Australia"}]}],"member":"263","reference":[{"key":"ref1","first-page":"1097","article-title":"ImageNet classification with deep convolutional neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NIPS)","volume":"25","author":"Krizhevsky"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref4","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Brown"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/2783258.2783273"},{"key":"ref6","volume-title":"Foundations of Machine Learning","author":"Mohri","year":"2018"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1994.6.5.851"},{"key":"ref8","first-page":"463","article-title":"Rademacher and Gaussian complexities: Risk bounds and structural results","volume":"3","author":"Bartlett","year":"2002","journal-title":"J. Mach. Learn. Res."},{"key":"ref9","article-title":"Learning multiple layers of features from tiny images","author":"Krizhevsky","year":"2009"},{"key":"ref10","article-title":"Towards a mathematical understanding of neural network-based machine learning: What we know and what we don\u2019t","author":"Ma","year":"2020","journal-title":"arXiv:2009.10713"},{"key":"ref11","article-title":"Recent advances in deep learning theory","author":"He","year":"2020","journal-title":"arXiv:2012.10931"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3446776"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.1903070116"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1088\/1742-5468\/ac3a74"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.3028431"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2023.103951"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1080\/01621459.2022.2093206"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-7908-2604-3_16"},{"key":"ref19","first-page":"1225","article-title":"Train faster, generalize better: Stability of stochastic gradient descent","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Hardt"},{"key":"ref20","first-page":"1143","article-title":"Control batch size and learning rate to generalize well: Theoretical and empirical evidence","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"He"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.3027750"},{"key":"ref22","first-page":"1724","article-title":"How to escape saddle points efficiently","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Jin"},{"key":"ref23","article-title":"Deep learning without poor local minima","volume-title":"Proc. Advances Neural Inf. Process. Syst.","volume":"29","author":"Kawaguchi"},{"key":"ref24","article-title":"Depth creates no bad local minima","author":"Lu","year":"2017","journal-title":"arXiv:1702.08580"},{"key":"ref25","article-title":"Critical points of neural networks: Analytical forms and landscape properties","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Zhou"},{"key":"ref26","article-title":"Piecewise linear activations substantially shape the loss surfaces of neural networks","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"He"},{"key":"ref27","article-title":"Truth or backpropaganda? An empirical investigation of deep learning theory","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Goldblum"},{"issue":"1","key":"ref28","first-page":"2822","article-title":"The implicit bias of gradient descent on separable data","volume":"19","author":"Soudry","year":"2018","journal-title":"J. Mach. Learn. Res."},{"key":"ref29","first-page":"17176","article-title":"Directional convergence and alignment in deep learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Ji"},{"key":"ref30","article-title":"Gradient descent maximizes the margin of homogeneous neural networks","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Lyu"},{"key":"ref31","first-page":"1305","article-title":"Implicit bias of gradient descent for wide two-layer neural networks trained with the logistic loss","volume-title":"Proc. Conf. Learn. Theory","author":"Chizat"},{"key":"ref32","article-title":"The deep bootstrap framework: Good online learners are good offline generalizers","author":"Nakkiran","year":"2020","journal-title":"arXiv:2010.08127"},{"key":"ref33","article-title":"Assessing generalization of SGD via disagreement","author":"Jiang","year":"2021","journal-title":"arXiv:2106.13799"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.3043196"},{"key":"ref35","volume-title":"Understanding deep learning via large-scale systematic experiments","author":"Lei","year":"2021"},{"key":"ref36","first-page":"5301","article-title":"On the spectral bias of neural networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Rahaman"},{"key":"ref37","article-title":"Frequency principle: Fourier analysis sheds light on deep neural networks","author":"John Xu","year":"2019","journal-title":"arXiv:1901.06523"},{"key":"ref38","first-page":"3496","article-title":"SGD on neural networks learns functions of increasing complexity","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Kalimeris"},{"key":"ref39","article-title":"The local elasticity of neural networks","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"He"},{"key":"ref40","article-title":"Stiffness: A new perspective on generalization in neural networks","author":"Fort","year":"2019","journal-title":"arXiv:1901.09491"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.2015509117"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.2103091118"},{"key":"ref43","article-title":"Decision boundary analysis of adversarial examples","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"He"},{"key":"ref44","article-title":"Characterizing the decision boundary of deep neural networks","author":"Karimi","year":"2019","journal-title":"arXiv:1912.11460"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1145\/3336191.3372186"},{"key":"ref46","volume-title":"On the Decision Boundaries of Deep Neural Networks: A Tropical Geometry Perspective","author":"Alfarra","year":"2020"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/ICMLA51294.2020.00025"},{"key":"ref48","article-title":"Understanding the decision boundary of deep neural networks: An empirical study","author":"Mickisch","year":"2020","journal-title":"arXiv:2002.01810"},{"key":"ref49","article-title":"Hold me tight! Influence of discriminative features on deep network boundaries","author":"Ortiz-Jimenez","year":"2020","journal-title":"arXiv:2002.06349"},{"key":"ref50","article-title":"The pitfalls of simplicity bias in neural networks","author":"Shah","year":"2020","journal-title":"arXiv:2006.07710"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01249-6_41"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.06083"},{"key":"ref53","volume-title":"Information Theory, Inference and Learning Algorithms","author":"MacKay","year":"2003"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1992.4.3.448"},{"key":"ref55","article-title":"Laplace redux\u2013effortless Bayesian deep learning","volume-title":"Proc. NeurIPS","author":"Daxberger"},{"key":"ref56","article-title":"Very deep convolutional networks for large-scale image recognition","author":"Simonyan","year":"2014","journal-title":"arXiv:1409.1556"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.5244\/c.30.87"},{"key":"ref58","article-title":"Differentiable augmentation for data-efficient GAN training","author":"Zhao","year":"2020","journal-title":"arXiv:2006.10738"},{"key":"ref59","article-title":"Selection via proxy: Efficient data selection for deep learning","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Coleman"},{"key":"ref60","article-title":"Bayesian model selection, the marginal likelihood, and generalization","author":"Lotfi","year":"2022","journal-title":"arXiv:2202.11678"},{"key":"ref61","article-title":"Fixup initialization: Residual learning without normalization","author":"Zhang","year":"2019","journal-title":"arXiv:1901.09321"},{"key":"ref62","first-page":"4563","article-title":"Scalable marginal likelihood estimation for model selection in deep learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Immer"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9781107298019"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/10832116\/10306309.pdf?arnumber=10306309","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,8]],"date-time":"2025-01-08T20:21:22Z","timestamp":1736367682000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10306309\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,1]]},"references-count":63,"journal-issue":{"issue":"1"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2023.3326654","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,1]]}}}