{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,25]],"date-time":"2025-06-25T04:06:30Z","timestamp":1750824390631,"version":"3.41.0"},"reference-count":91,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"7","license":[{"start":{"date-parts":[[2025,7,1]],"date-time":"2025-07-01T00:00:00Z","timestamp":1751328000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,7,1]],"date-time":"2025-07-01T00:00:00Z","timestamp":1751328000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,7,1]],"date-time":"2025-07-01T00:00:00Z","timestamp":1751328000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Key Research and Development Program of China","award":["2024YFA1014202"],"award-info":[{"award-number":["2024YFA1014202"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["12371441"],"award-info":[{"award-number":["12371441"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Inform. Theory"],"published-print":{"date-parts":[[2025,7]]},"DOI":"10.1109\/tit.2025.3570730","type":"journal-article","created":{"date-parts":[[2025,5,16]],"date-time":"2025-05-16T17:43:39Z","timestamp":1747417419000},"page":"5512-5538","source":"Crossref","is-referenced-by-count":0,"title":["Error Analysis of Three-Layer Neural Network Trained With PGD for Deep Ritz Method"],"prefix":"10.1109","volume":"71","author":[{"given":"Yuling","family":"Jiao","sequence":"first","affiliation":[{"name":"School of Artificial Intelligence, the National Center for Applied Mathematics in Hubei, Hubei Key Laboratory of Computational Science, and the School of Mathematics and Statistics, Wuhan University, Wuhan, Hubei, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-5510-8760","authenticated-orcid":false,"given":"Yanming","family":"Lai","sequence":"additional","affiliation":[{"name":"Department of Mathematics, The Hong Kong University of Science and Technology, Hong Kong, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8903-2388","authenticated-orcid":false,"given":"Yang","family":"Wang","sequence":"additional","affiliation":[{"name":"Department of Mathematics, The Hong Kong University of Science and Technology, Hong Kong, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1017\/S0962492900002919"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2017.07.002"},{"key":"ref3","first-page":"639","article-title":"Optimal approximation of continuous functions by very deep ReLU networks","volume-title":"Proc. Conf. Learn. Theory","author":"Yarotsky"},{"key":"ref4","first-page":"11932","article-title":"Elementary superexpressive activations","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Yarotsky"},{"key":"ref5","first-page":"13005","article-title":"The phase diagram of approximation rates for deep neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Yarotsky"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2020.05.019"},{"key":"ref7","first-page":"1","article-title":"Adaptivity of deep ReLU network for learning in Besov and mixed smooth Besov spaces: Optimal rate and curse of dimensionality","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Suzuki"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/s10208-022-09595-3"},{"issue":"5","key":"ref9","doi-asserted-by":"crossref","first-page":"1768","DOI":"10.4208\/cicp.OA-2020-0149","article-title":"Deep network approximation characterized by number of neurons","volume":"28","author":"Shen","year":"2020","journal-title":"Commun. Comput. Phys."},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1137\/20M134695X"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1162\/neco_a_01364"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2021.04.011"},{"issue":"276","key":"ref13","first-page":"1","article-title":"Deep network approximation: Achieving arbitrary accuracy with fixed number of neurons","volume":"23","author":"Shen","year":"2021","journal-title":"J. Mach. Learn. Res."},{"issue":"35","key":"ref14","first-page":"1","article-title":"Deep network approximation: Beyond ReLU to diverse activation functions","volume":"25","author":"Shi-jun","year":"2023","journal-title":"J. Mach. Learn. Res."},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1137\/21M144431X"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1142\/S0219530519410021"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2020.11.010"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/s00365-024-09679-z"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1017\/S0962492921000027"},{"key":"ref20","first-page":"8580","article-title":"Neural tangent kernel: Convergence and generalization in neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Jacot"},{"key":"ref21","first-page":"242","article-title":"A convergence theory for deep learning via over-parameterization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Allen-Zhu"},{"key":"ref22","first-page":"6155","article-title":"Learning and generalization in overparameterized neural networks, going beyond two layers","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Allen-Zhu"},{"key":"ref23","first-page":"6673","article-title":"On the convergence rate of training recurrent neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Allen-Zhu"},{"key":"ref24","first-page":"1","article-title":"Gradient descent provably optimizes over-parameterized neural networks","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Du"},{"key":"ref25","first-page":"1675","article-title":"Gradient descent finds global minima of deep neural networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Du"},{"key":"ref26","first-page":"322","article-title":"Fine-grained analysis of optimization and generalization for overparameterized two-layer neural networks","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Arora"},{"key":"ref27","first-page":"2053","article-title":"An improved analysis of training over-parameterized deep neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Zou"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-019-05839-6"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5736"},{"key":"ref30","first-page":"10835","article-title":"Generalization bounds of stochastic gradient descent for wide and deep neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Cao"},{"key":"ref31","first-page":"1","article-title":"How much over-parameterization is sufficient to learn deep ReLU networks?","volume-title":"Proc. Int. Conf. Learn. Represent. (ICLR)","author":"Chen"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.2018.2854560"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/JSAIT.2020.2991332"},{"key":"ref34","first-page":"11961","article-title":"Global convergence of deep networks with one wide layer followed by pyramidal topology","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Nguyen"},{"key":"ref35","first-page":"8056","article-title":"On the proof of global convergence of gradient descent for deep ReLU networks with linear widths","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Nguyen"},{"key":"ref36","first-page":"15954","article-title":"On the linearity of large non-linear models: When and why the tangent kernel is constant","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Liu"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1016\/j.acha.2021.12.009"},{"key":"ref38","first-page":"110","article-title":"Neural tangent kernel at initialization: Linear width suffices","volume-title":"Proc. Uncertainty Artif. Intell.","author":"Banerjee"},{"key":"ref39","first-page":"1","article-title":"Effect of activation functions on the training of overparametrized neural nets","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Panigrahi"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.1806579115"},{"key":"ref41","first-page":"3040","article-title":"On the global convergence of gradient descent for over-parameterized models using optimal transport","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Chizat"},{"key":"ref42","first-page":"1887","article-title":"Modeling from features: A mean-field framework for over-parameterized deep neural networks","volume-title":"Proc. Conf. Learn. Theory","author":"Fang"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.4171\/msl\/42"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1137\/18M1192184"},{"key":"ref45","first-page":"463","article-title":"Rademacher and Gaussian complexities: Risk bounds and structural results","volume":"3","author":"Bartlett","year":"2002","journal-title":"J. Mach. Learn. Res."},{"issue":"63","key":"ref46","first-page":"1","article-title":"Nearly-tight VC-dimension and pseudodimension bounds for piecewise linear neural networks","volume":"20","author":"Bartlett","year":"2019","journal-title":"J. Mach. Learn. Res."},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1093\/imaiai\/iaz007"},{"key":"ref48","first-page":"1376","article-title":"Norm-based capacity control in neural networks","volume-title":"Proc. Conf. Learn. Theory","author":"Neyshabur"},{"key":"ref49","first-page":"21721","article-title":"Nearly optimal VC-dimension and pseudo-dimension bounds for deep neural network derivatives","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Yang"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1996.8.1.164"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.1718942115"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1088\/1361-6544\/ac337f"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1016\/j.jcp.2018.08.029"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1137\/19m1274067"},{"key":"ref55","first-page":"3208","article-title":"PDE-Net: Learning PDEs from data","volume-title":"Proc. 35th Int. Conf. Mach. Learn.","author":"Long"},{"issue":"25","key":"ref56","first-page":"1","article-title":"Deep hidden physics models: Deep learning of nonlinear partial differential equations","volume":"19","author":"Raissi","year":"2018","journal-title":"J. Mach. Learn. Res."},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1007\/s40304-018-0127-z"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1016\/j.jcp.2018.10.045"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1016\/j.jcp.2020.109409"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1137\/22M1488405"},{"key":"ref61","first-page":"253","article-title":"Some observations on high-dimensional partial differential equations with barron data","volume-title":"Proc. Math. Sci. Mach. Learn.","author":"Weinan"},{"key":"ref62","first-page":"3196","article-title":"A priori generalization analysis of the deep Ritz method for solving high dimensional elliptic partial differential equations","volume-title":"Proc. Conf. Learn. Theory","author":"Lu"},{"key":"ref63","article-title":"A priori analysis of stable neural network solutions to numerical PDEs","author":"Hong","year":"2021","journal-title":"arXiv:2104.02903"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.4208\/cicp.OA-2020-0191"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1090\/cams\/5"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1142\/S021953052350015X"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.4208\/cicp.OA-2021-0195"},{"issue":"4","key":"ref68","first-page":"1020","article-title":"Deep Ritz methods for Laplace equations with Dirichlet boundary condition","volume":"31","author":"Duan","year":"2022","journal-title":"Commun. Comput. Phys."},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.4208\/cicp.OA-2021-0186"},{"key":"ref70","first-page":"1","article-title":"Machine learning for elliptic PDEs: Fast rate generalization bound, neural scaling law and minimax optimality","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Lu"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.4208\/cicp.OA-2020-0193"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1007\/s10915-023-02432-x"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.4208\/jcm.2101-m2020-0342"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1007\/s10444-022-09985-9"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1093\/imanum\/drab093"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1615\/JMachLearnModelComput.2023050411"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-37800-3_4"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1137\/22M1527763"},{"key":"ref79","first-page":"215","article-title":"Error estimates for the deep Ritz method with boundary penalty","volume-title":"Proc. Math. Sci. Mach. Learn.","author":"M\u00fcller"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2021.08.015"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1007\/s10463-024-00898-6"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1109\/tit.2025.3541181"},{"key":"ref83","article-title":"Analysis of the expected L2 error of an over-parametrized deep neural network estimate learned by gradient descent without regularization","author":"Drews","year":"2023","journal-title":"arXiv:2311.14609"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1137\/1.9780898719208"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4757-4338-8"},{"volume-title":"Sobolev Spaces","year":"2003","author":"Adams","key":"ref86"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.1137\/1.9781611972030"},{"key":"ref88","volume-title":"Convex Optimization Theory","volume":"1","author":"Bertsekas","year":"2009"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1017\/9781108231596"},{"volume-title":"Selected Works of A. N. Kolmogorov: Information Theory and the Theory of Algorithms","year":"2010","author":"Shiryayev","key":"ref90"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.1080\/03605309608821249"}],"container-title":["IEEE Transactions on Information Theory"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/18\/11045245\/11006476.pdf?arnumber=11006476","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,24]],"date-time":"2025-06-24T07:06:12Z","timestamp":1750748772000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11006476\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7]]},"references-count":91,"journal-issue":{"issue":"7"},"URL":"https:\/\/doi.org\/10.1109\/tit.2025.3570730","relation":{},"ISSN":["0018-9448","1557-9654"],"issn-type":[{"type":"print","value":"0018-9448"},{"type":"electronic","value":"1557-9654"}],"subject":[],"published":{"date-parts":[[2025,7]]}}}