{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,21]],"date-time":"2026-02-21T21:11:02Z","timestamp":1771708262024,"version":"3.50.1"},"reference-count":57,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2019,2,1]],"date-time":"2019-02-01T00:00:00Z","timestamp":1548979200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,2,1]],"date-time":"2019-02-01T00:00:00Z","timestamp":1548979200000},"content-version":"am","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,2,1]],"date-time":"2019-02-01T00:00:00Z","timestamp":1548979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,2,1]],"date-time":"2019-02-01T00:00:00Z","timestamp":1548979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Simons Institute for the Theory of Computing"},{"DOI":"10.13039\/100000181","name":"Air Force Office of Scientific Research","doi-asserted-by":"publisher","award":["FA9550-18-1-0078"],"award-info":[{"award-number":["FA9550-18-1-0078"]}],"id":[{"id":"10.13039\/100000181","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","award":["#1813877"],"award-info":[{"award-number":["#1813877"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100006785","name":"Google","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100006785","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100005014","name":"Northrop Grumman","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100005014","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Google Faculty Research Award"},{"DOI":"10.13039\/100006034","name":"University of Southern California","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100006034","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Inform. Theory"],"published-print":{"date-parts":[[2019,2]]},"DOI":"10.1109\/tit.2018.2854560","type":"journal-article","created":{"date-parts":[[2018,7,10]],"date-time":"2018-07-10T19:04:49Z","timestamp":1531249489000},"page":"742-769","source":"Crossref","is-referenced-by-count":142,"title":["Theoretical Insights Into the Optimization Landscape of Over-Parameterized Shallow Neural Networks"],"prefix":"10.1109","volume":"65","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-2101-6418","authenticated-orcid":false,"given":"Mahdi","family":"Soltanolkotabi","sequence":"first","affiliation":[]},{"given":"Adel","family":"Javanmard","sequence":"additional","affiliation":[]},{"given":"Jason D.","family":"Lee","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.1991.155333"},{"key":"ref38","first-page":"643","article-title":"Gradient methods for minimizing functionals","volume":"3","author":"polyak","year":"1963","journal-title":"Zhurnal Wychislitelnoy Matematiki i Matematicheskoy Fiziki"},{"key":"ref33","first-page":"855","article-title":"On the computational efficiency of training neural networks","author":"livni","year":"2014","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref32","author":"li","year":"2017","journal-title":"Convergence analysis of two-layer neural networks with relu activation"},{"key":"ref31","author":"li","year":"2017","journal-title":"Algorithmic regularization in over-parameterized matrix sensing and neural networks with quadratic activations"},{"key":"ref30","author":"levy","year":"2016","journal-title":"The power of normalization Faster evasion of saddle points"},{"key":"ref37","author":"nguyen","year":"2017","journal-title":"The loss surface of deep and wide neural networks"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-006-0706-8"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2011.2109382"},{"key":"ref34","author":"mei","year":"2016","journal-title":"The landscape of empirical risk for non-convex losses"},{"key":"ref28","doi-asserted-by":"crossref","DOI":"10.1090\/surv\/089","volume":"89","author":"ledoux","year":"2005","journal-title":"The Concentration of Measure Phenomenon"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2017.2671158"},{"key":"ref29","first-page":"1246","article-title":"Gradient descent only converges to minimizers","author":"lee","year":"2016","journal-title":"Proc Conf Learn Theory"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/s00365-010-9117-4"},{"key":"ref1","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1214\/ECP.v20-3829","article-title":"A note on the Hanson&#x2013;Wright inequality for random vectors with dependencies","volume":"20","author":"adamczak","year":"2015","journal-title":"Electron Commun Probab"},{"key":"ref20","first-page":"1594","article-title":"Beyond convexity: Stochastic quasi-convex optimization","author":"hazan","year":"2015","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref22","author":"jin","year":"2017","journal-title":"How to escape saddle points efficiently"},{"key":"ref21","author":"janzamin","year":"2015","journal-title":"Beating the perils of non-convexity Guaranteed training of neural networks using tensor methods"},{"key":"ref24","article-title":"The isotron algorithm: High-dimensional isotonic regression","author":"kalai","year":"2009","journal-title":"Proc Conf Learning Theory (COLT)"},{"key":"ref23","first-page":"927","article-title":"Efficient learning of generalized linear and single index models with isotonic regression","author":"kakade","year":"2011","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref26","first-page":"1097","article-title":"ImageNet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref25","first-page":"586","article-title":"Deep learning without poor local minima","author":"kawaguchi","year":"2016","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref50","author":"vershynin","year":"2010","journal-title":"Introduction to the Non-Asymptotic Analysis of Random Matrices"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1088\/0305-4470\/30\/17\/002"},{"key":"ref57","author":"zhong","year":"2017","journal-title":"Recovery guarantees for one-hidden-layer neural networks"},{"key":"ref56","first-page":"993","article-title":"$\\ell_{1}$\n-regularized neural networks are improperly learnable in polynomial time","author":"zhang","year":"2016","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref55","author":"zhang","year":"2017","journal-title":"Convergence results for neural networks via electrodynamics"},{"key":"ref54","author":"zhang","year":"2016","journal-title":"Understanding deep learning requires rethinking generalization"},{"key":"ref53","author":"xie","year":"2016","journal-title":"Diverse neural network learns true target functions"},{"key":"ref52","author":"wiatowski","year":"2017","journal-title":"Energy propagation in deep convolutional neural networks"},{"key":"ref10","author":"brutzkus","year":"2017","journal-title":"Globally optimal gradient descent for a ConvNet with Gaussian inputs"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.2015.2399924"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1103\/PhysRevE.52.4225"},{"key":"ref12","author":"carmon","year":"2016","journal-title":"Gradient descent efficiently finds the cubic-regularized non-convex newton step"},{"key":"ref13","author":"carmon","year":"2016","journal-title":"Accelerated methods for non-convex optimization"},{"key":"ref14","first-page":"192","article-title":"The loss surfaces of multilayer networks","author":"choromanska","year":"2015","journal-title":"Proc AISTATS"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/1390156.1390177"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-016-1026-2"},{"key":"ref17","first-page":"797","article-title":"Escaping from saddle points&#x2014;Online stochastic gradient for tensor decomposition","author":"ge","year":"2015","journal-title":"Proc 28th Conf Learn Theory"},{"key":"ref18","author":"goel","year":"2016","journal-title":"Reliably learning the ReLU in polynomial time"},{"key":"ref19","author":"haeffele","year":"2015","journal-title":"Global optimality in tensor factorization deep learning and beyond"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511801334"},{"key":"ref3","author":"agarwal","year":"2016","journal-title":"Finding approximate local minima faster than gradient descent"},{"key":"ref6","author":"bartlett","year":"2017","journal-title":"Spectrally-normalized margin bounds for neural networks"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/BF00993164"},{"key":"ref8","first-page":"494","article-title":"Training a 3-node neural network is NP-complete","author":"blum","year":"1989","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1088\/0305-4470\/29\/16\/005"},{"key":"ref49","first-page":"1","article-title":"An analytical formula of population gradient for two-layered ReLU network and its applications in convergence and critical point analysis","author":"tian","year":"2017","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref9","author":"b\u00f6lcskei","year":"2017","journal-title":"Optimal approximation with sparsely connected deep neural networks"},{"key":"ref46","author":"soudry","year":"2016","journal-title":"No bad local minima Data independent training error guarantees for multilayer neural networks"},{"key":"ref45","author":"soltanolkotabi","year":"2017","journal-title":"Structured signal recovery from quadratic measurements Breaking sample complexity barriers via nonconvex optimization"},{"key":"ref48","author":"telgarsky","year":"2016","journal-title":"Benefits of depth in neural networks"},{"key":"ref47","author":"soudry","year":"2017","journal-title":"Exponentially vanishing sub-optimal local minima in multilayer neural networks"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1137\/100806126"},{"key":"ref41","author":"safran","year":"2017","journal-title":"Spurious local minima are common in two-layer ReLU neural networks"},{"key":"ref44","author":"soltanolkotabi","year":"2017","journal-title":"Learning ReLUs via gradient descent"},{"key":"ref43","article-title":"Algorithms and theory for clustering and nonconvex quadratic programming","author":"soltanolkotabi","year":"2014"}],"container-title":["IEEE Transactions on Information Theory"],"original-title":[],"link":[{"URL":"https:\/\/ieeexplore.ieee.org\/ielaam\/18\/8620171\/8409482-aam.pdf","content-type":"application\/pdf","content-version":"am","intended-application":"syndication"},{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/18\/8620171\/08409482.pdf?arnumber=8409482","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,13]],"date-time":"2022-07-13T20:43:09Z","timestamp":1657744989000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8409482\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,2]]},"references-count":57,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tit.2018.2854560","relation":{},"ISSN":["0018-9448","1557-9654"],"issn-type":[{"value":"0018-9448","type":"print"},{"value":"1557-9654","type":"electronic"}],"subject":[],"published":{"date-parts":[[2019,2]]}}}