{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,6]],"date-time":"2025-11-06T01:03:58Z","timestamp":1762391038204,"version":"3.37.3"},"reference-count":53,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"8","license":[{"start":{"date-parts":[[2022,8,1]],"date-time":"2022-08-01T00:00:00Z","timestamp":1659312000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"name":"WuDao Research Foundation"},{"DOI":"10.13039\/501100002920","name":"General Research Fund","doi-asserted-by":"publisher","award":["16201320"],"award-info":[{"award-number":["16201320"]}],"id":[{"id":"10.13039\/501100002920","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Inform. Theory"],"published-print":{"date-parts":[[2022,8]]},"DOI":"10.1109\/tit.2022.3163341","type":"journal-article","created":{"date-parts":[[2022,3,30]],"date-time":"2022-03-30T19:58:06Z","timestamp":1648670286000},"page":"5340-5352","source":"Crossref","is-referenced-by-count":5,"title":["Convex Formulation of Overparameterized Deep Neural Networks"],"prefix":"10.1109","volume":"68","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5076-7897","authenticated-orcid":false,"given":"Cong","family":"Fang","sequence":"first","affiliation":[{"name":"Shenzhen Research Institute of Big Data, Shenzhen, China"}]},{"given":"Yihong","family":"Gu","sequence":"additional","affiliation":[{"name":"Shenzhen Research Institute of Big Data, Shenzhen, China"}]},{"given":"Weizhong","family":"Zhang","sequence":"additional","affiliation":[{"name":"Department of Mathematics, The Hong Kong University of Science and Technology (HKUST), Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5511-2558","authenticated-orcid":false,"given":"Tong","family":"Zhang","sequence":"additional","affiliation":[{"name":"Department of Mathematics, The Hong Kong University of Science and Technology (HKUST), Hong Kong"}]}],"member":"263","reference":[{"key":"ref1","first-page":"1097","article-title":"ImageNet classification with deep convolutional neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Krizhevsky"},{"volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Simonyan","article-title":"Very deep convolutional networks for large-scale image recognition","key":"ref2"},{"doi-asserted-by":"publisher","key":"ref3","DOI":"10.1109\/CVPR.2016.90"},{"doi-asserted-by":"publisher","key":"ref4","DOI":"10.1109\/CVPR.2014.244"},{"doi-asserted-by":"publisher","key":"ref5","DOI":"10.1109\/CVPR.2015.7299101"},{"doi-asserted-by":"publisher","key":"ref6","DOI":"10.18653\/v1\/D15-1166"},{"doi-asserted-by":"publisher","key":"ref7","DOI":"10.1073\/pnas.1806579115"},{"key":"ref8","first-page":"3036","article-title":"On the global convergence of gradient descent for over-parameterized models using optimal transport","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Chizat"},{"key":"ref9","first-page":"1675","article-title":"Gradient descent finds global minima of deep neural networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Du"},{"key":"ref10","first-page":"242","article-title":"A convergence theory for deep learning via over-parameterization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Allen-Zhu"},{"doi-asserted-by":"publisher","key":"ref11","DOI":"10.1145\/3446776"},{"doi-asserted-by":"publisher","key":"ref12","DOI":"10.1007\/978-3-319-10590-1_53"},{"doi-asserted-by":"publisher","key":"ref13","DOI":"10.1109\/TIT.2017.2776228"},{"volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Hardt","article-title":"Identity matters in deep learning","key":"ref14"},{"key":"ref15","article-title":"Topology and geometry of half-rectified network optimization","author":"Freeman","year":"2016","journal-title":"arXiv:1611.01540"},{"key":"ref16","article-title":"Globally optimal gradient descent for a ConvNet with Gaussian inputs","author":"Brutzkus","year":"2017","journal-title":"arXiv:1702.07966"},{"key":"ref17","article-title":"Learning one-hidden-layer neural networks with landscape design","author":"Ge","year":"2017","journal-title":"arXiv:1711.00501"},{"key":"ref18","article-title":"Learning two layer rectified neural networks in polynomial time","author":"Bakshi","year":"2018","journal-title":"arXiv:1811.01885"},{"doi-asserted-by":"publisher","key":"ref19","DOI":"10.1109\/TIT.2018.2854560"},{"key":"ref20","first-page":"797","article-title":"Escaping from saddle points\u2014Online stochastic gradient for tensor decomposition","volume-title":"Proc. Annu. Conf. Learn. Theory","author":"Ge"},{"key":"ref21","first-page":"1724","article-title":"How to escape saddle points efficiently","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Jin"},{"key":"ref22","article-title":"SPIDER: Near-optimal non-convex optimization via stochastic path-integrated differential estimator","volume-title":"Advances in Neural Information Processing Systems","volume":"31","author":"Fang","year":"2018"},{"key":"ref23","first-page":"1192","article-title":"Sharp analysis for nonconvex SGD escaping from saddle points","volume-title":"Proc. Annu. Conf. Learn. Theory","author":"Fang"},{"doi-asserted-by":"publisher","key":"ref24","DOI":"10.1016\/j.spa.2019.06.003"},{"key":"ref25","article-title":"Trainability and accuracy of neural networks: An interacting particle system approach","author":"Rotskoff","year":"2018","journal-title":"arXiv:1805.00915"},{"key":"ref26","first-page":"2388","article-title":"Mean-field theory of two-layers neural networks: Dimension-free bounds and kernel limit","volume-title":"Proc. Annu. Conf. Learn. Theory","author":"Mei"},{"key":"ref27","first-page":"9712","article-title":"Regularization matters: Generalization and optimization of neural nets vs their induced kernel","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Wei"},{"volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Pham","article-title":"Global convergence of three-layer neural networks in the mean field regime","key":"ref28"},{"volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Du","article-title":"Gradient descent provably optimizes over-parameterized neural networks","key":"ref29"},{"key":"ref30","article-title":"Learning overparameterized neural networks via stochastic gradient descent on structured data","volume-title":"Advances in Neural Information Processing Systems","volume":"31","author":"Li","year":"2018"},{"key":"ref31","first-page":"322","article-title":"Fine-grained analysis of optimization and generalization for overparameterized two-layer neural networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Arora"},{"key":"ref32","article-title":"On learning over-parameterized neural networks: A functional approximation prospective","volume-title":"Advances in Neural Information Processing Systems","volume":"32","author":"Su","year":"2019"},{"volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Lee","article-title":"Deep neural networks as Gaussian processes","key":"ref33"},{"key":"ref34","article-title":"Neural tangent kernel: Convergence and generalization in neural networks","volume":"31","author":"Jacot","year":"2018","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref35","article-title":"Learning and generalization in overparameterized neural networks, going beyond two layers","volume":"32","author":"Allen-Zhu","year":"2019","journal-title":"Advances in Neural Information Processing Systems"},{"doi-asserted-by":"publisher","key":"ref36","DOI":"10.1007\/s10994-019-05839-6"},{"doi-asserted-by":"publisher","key":"ref37","DOI":"10.1109\/TIT.2021.3065212"},{"key":"ref38","first-page":"7695","article-title":"Neural networks are convex regularizers: Exact polynomial-time convex optimization formulations for two-layer networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Pilanci"},{"key":"ref39","article-title":"Implicit convex regularizers of CNN architectures: Convex optimization of two- and three-layer networks in polynomial time","author":"Ergen","year":"2020","journal-title":"arXiv:2006.14798"},{"issue":"212","key":"ref40","first-page":"1","article-title":"Convex geometry and duality of over-parameterized neural networks","volume":"22","author":"Ergen","year":"2021","journal-title":"J. Mach. Learn. Res."},{"key":"ref41","article-title":"Training quantized neural networks to global optimality via semidefinite programming","author":"Bartan","year":"2021","journal-title":"arXiv:2105.01420"},{"key":"ref42","first-page":"3004","article-title":"Revealing the structure of deep neural networks via convex duality","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Ergen"},{"key":"ref43","first-page":"3797","article-title":"How to characterize the landscape of overparameterized convolutional neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Gu"},{"key":"ref44","first-page":"1887","article-title":"Modeling from features: A mean-field framework for over-parameterized deep neural networks","volume-title":"Proc. Annu. Conf. Learn. Theory","author":"Fang"},{"key":"ref45","article-title":"A mean-field limit for certain deep neural networks","author":"Ara\u00fajo","year":"2019","journal-title":"arXiv:1906.00193"},{"key":"ref46","article-title":"{Euclidean, metric, and Wasserstein} gradient flows: An overview","author":"Santambrogio","year":"2016","journal-title":"arXiv:1609.03890"},{"key":"ref47","article-title":"Over parameterized two-level neural networks can learn near optimal feature representations","author":"Fang","year":"2019","journal-title":"arXiv:1910.11508"},{"doi-asserted-by":"publisher","key":"ref48","DOI":"10.1609\/aaai.v31i1.10913"},{"doi-asserted-by":"publisher","key":"ref49","DOI":"10.1007\/s11263-015-0816-y"},{"volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Ravi","article-title":"Optimization as a model for few-shot learning","key":"ref50"},{"key":"ref51","first-page":"3630","article-title":"Matching networks for one shot learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Vinyals"},{"doi-asserted-by":"publisher","key":"ref52","DOI":"10.1017\/9781108627771"},{"doi-asserted-by":"publisher","key":"ref53","DOI":"10.1111\/1467-9469.00172"}],"container-title":["IEEE Transactions on Information Theory"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/18\/9829223\/09745067.pdf?arnumber=9745067","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,18]],"date-time":"2024-01-18T00:41:47Z","timestamp":1705538507000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9745067\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,8]]},"references-count":53,"journal-issue":{"issue":"8"},"URL":"https:\/\/doi.org\/10.1109\/tit.2022.3163341","relation":{},"ISSN":["0018-9448","1557-9654"],"issn-type":[{"type":"print","value":"0018-9448"},{"type":"electronic","value":"1557-9654"}],"subject":[],"published":{"date-parts":[[2022,8]]}}}