{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,11,19]],"date-time":"2024-11-19T16:59:23Z","timestamp":1732035563769},"reference-count":37,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2015,4,11]],"date-time":"2015-04-11T00:00:00Z","timestamp":1428710400000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Math. Program."],"published-print":{"date-parts":[[2015,6]]},"DOI":"10.1007\/s10107-015-0897-y","type":"journal-article","created":{"date-parts":[[2015,4,11]],"date-time":"2015-04-11T00:06:05Z","timestamp":1428710765000},"page":"283-313","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":16,"title":["A globally convergent incremental Newton method"],"prefix":"10.1007","volume":"151","author":[{"given":"M.","family":"G\u00fcrb\u00fczbalaban","sequence":"first","affiliation":[]},{"given":"A.","family":"Ozdaglar","sequence":"additional","affiliation":[]},{"given":"P.","family":"Parrilo","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2015,4,11]]},"reference":[{"issue":"3","key":"897_CR1","doi-asserted-by":"crossref","first-page":"807","DOI":"10.1137\/S1052623494268522","volume":"6","author":"D Bertsekas","year":"1996","unstructured":"Bertsekas, D.: Incremental least squares methods and the extended Kalman filter. SIAM J. Optim. 6(3), 807\u2013822 (1996)","journal-title":"SIAM J. Optim."},{"issue":"4","key":"897_CR2","doi-asserted-by":"crossref","first-page":"913","DOI":"10.1137\/S1052623495287022","volume":"7","author":"D Bertsekas","year":"1997","unstructured":"Bertsekas, D.: A new class of incremental gradient methods for least squares problems. SIAM J. Optim. 7(4), 913\u2013926 (1997)","journal-title":"SIAM J. Optim."},{"key":"897_CR3","unstructured":"Bertsekas, D.: Nonlinear Programming. Athena Scientific, United States (1999)"},{"key":"897_CR4","first-page":"1","volume":"2010","author":"D Bertsekas","year":"2011","unstructured":"Bertsekas, D.: Incremental gradient, subgradient, and proximal methods for convex optimization: a survey. Optim. Mach. Learn. 2010, 1\u201338 (2011)","journal-title":"Optim. Mach. Learn."},{"key":"897_CR5","unstructured":"Bertsekas, D.: Convex Optimization Algorithms. Athena Scientific, United States (2015)"},{"issue":"1","key":"897_CR6","doi-asserted-by":"crossref","first-page":"29","DOI":"10.1137\/040615961","volume":"18","author":"D Blatt","year":"2007","unstructured":"Blatt, D., Hero, A., Gauchman, H.: A convergent incremental gradient method with a constant step size. SIAM J. Optim. 18(1), 29\u201351 (2007)","journal-title":"SIAM J. Optim."},{"key":"897_CR7","unstructured":"Bordes, A., Bottou, L., Gallinari, P.: SGD-QN: careful quasi-Newton stochastic gradient descent. J. Mach. Learn. Res. 10, 1737\u20131754 (2009)"},{"key":"897_CR8","doi-asserted-by":"crossref","unstructured":"Bottou, L.: Large-scale machine learning with stochastic gradient descent. In: Lechevallier, Y., Saporta, G. (eds.) Proceedings of COMPSTAT\u20192010, pp. 177\u2013186. Physica-Verlag HD, Heidelberg (2010)","DOI":"10.1007\/978-3-7908-2604-3_16"},{"issue":"2","key":"897_CR9","doi-asserted-by":"crossref","first-page":"137","DOI":"10.1002\/asmb.538","volume":"21","author":"L Bottou","year":"2005","unstructured":"Bottou, L., Le Cun, Y.: On-line learning for very large data sets. Appl. Stoch. Models Bus. Ind. 21(2), 137\u2013151 (2005)","journal-title":"Appl. Stoch. Models Bus. Ind."},{"issue":"1","key":"897_CR10","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1561\/2200000016","volume":"3","author":"S Boyd","year":"2011","unstructured":"Boyd, S., Parikh, N., Chu, E., Peleato, B., Eckstein, J.: Distributed optimization and statistical learning via the alternating direction method of multipliers. Found. Trends Mach. Learn. 3(1), 1\u2013122 (2011)","journal-title":"Found. Trends Mach. Learn."},{"key":"897_CR11","unstructured":"Byrd, R.H., Hansen, S.L., Nocedal, J., Singer, Y.: A Stochastic Quasi-Newton Method for Large-Scale Optimization. arXiv preprint arXiv:1401.7020 (2014)"},{"issue":"3","key":"897_CR12","doi-asserted-by":"crossref","first-page":"543","DOI":"10.1023\/A:1017583307974","volume":"108","author":"E C\u0103tina\u015f","year":"2001","unstructured":"C\u0103tina\u015f, E.: Inexact perturbed Newton methods and applications to a class of Krylov solvers. J. Optim. Theory Appl. 108(3), 543\u2013570 (2001)","journal-title":"J. Optim. Theory Appl."},{"issue":"2","key":"897_CR13","doi-asserted-by":"crossref","first-page":"187","DOI":"10.1007\/BF00935703","volume":"18","author":"WC Davidon","year":"1976","unstructured":"Davidon, W.C.: New least-square algorithms. J. Optim. Theory Appl. 18(2), 187\u2013197 (1976)","journal-title":"J. Optim. Theory Appl."},{"key":"897_CR14","unstructured":"Defazio, A., Bach, F., Lacoste-Julien, S.: SAGA: a fast incremental gradient method with support for non-strongly convex composite objectives. arXiv preprint arXiv:1407.0202 (2014)"},{"issue":"2","key":"897_CR15","doi-asserted-by":"crossref","first-page":"400","DOI":"10.1137\/0719025","volume":"19","author":"R Dembo","year":"1982","unstructured":"Dembo, R., Eisenstat, S., Steihaug, T.: Inexact Newton methods. SIAM J. Numer. Anal. 19(2), 400\u2013408 (1982)","journal-title":"SIAM J. Numer. Anal."},{"issue":"126","key":"897_CR16","doi-asserted-by":"crossref","first-page":"549","DOI":"10.1090\/S0025-5718-1974-0343581-1","volume":"28","author":"JE Dennis","year":"1974","unstructured":"Dennis, J.E., Mor\u00e9, J.J.: A characterization of superlinear convergence and its application to quasi-newton methods. Math. Comput. 28(126), 549\u2013560 (1974)","journal-title":"Math. Comput."},{"key":"897_CR17","first-page":"2121","volume":"12","author":"J Duchi","year":"2011","unstructured":"Duchi, J., Hazan, E., Singer, Y.: Adaptive subgradient methods for online learning and stochastic optimization. J. Mach. Learn. Res. 12, 2121\u20132159 (2011)","journal-title":"J. Mach. Learn. Res."},{"key":"897_CR18","unstructured":"Mairal, J.: Optimization with first-order surrogate functions. In: ICML, Volume 28 of JMLR Proceedings, pp. 783\u2013791, Atlanta, United States (2013)"},{"issue":"2","key":"897_CR19","doi-asserted-by":"crossref","first-page":"103","DOI":"10.1080\/10556789408805581","volume":"4","author":"OL Mangasarian","year":"1994","unstructured":"Mangasarian, O.L., Solodov, M.V.: Serial and parallel backpropagation convergence via nonmonotone perturbed minimization. Optim. Methods Softw. 4(2), 103\u2013116 (1994)","journal-title":"Optim. Methods Softw."},{"key":"897_CR20","doi-asserted-by":"crossref","unstructured":"Mokhtari, A., Ribeiro, A.: Res: regularized Stochastic BFGS algorithm. arXiv preprint arXiv:1401.7625 (2014)","DOI":"10.1109\/GlobalSIP.2013.6737088"},{"issue":"2","key":"897_CR21","doi-asserted-by":"crossref","first-page":"107","DOI":"10.1023\/A:1025703629626","volume":"26","author":"H Moriyama","year":"2003","unstructured":"Moriyama, H., Yamashita, N., Fukushima, M.: The incremental Gauss\u2013Newton algorithm with adaptive stepsize rule. Comput. Optim. Appl. 26(2), 107\u2013141 (2003)","journal-title":"Comput. Optim. Appl."},{"key":"897_CR22","doi-asserted-by":"crossref","first-page":"223","DOI":"10.1007\/978-1-4757-6594-6_11","volume-title":"Stochastic Optimization: Algorithms and Applications. Applied Optimization","author":"A Nedi\u0107","year":"2001","unstructured":"Nedi\u0107, A., Bertsekas, D.: Convergence rate of incremental subgradient algorithms. In: Uryasev, S., Pardalos, P.M. (eds.) Stochastic Optimization: Algorithms and Applications. Applied Optimization, vol. 54, pp. 223\u2013264. Springer, US (2001)"},{"key":"897_CR23","doi-asserted-by":"crossref","unstructured":"Nedi\u0107, A., Ozdaglar, A.: On the rate of convergence of distributed subgradient methods for multi-agent optimization. In: Proceedings of IEEE CDC, pp. 4711\u20134716 (2007)","DOI":"10.1109\/CDC.2007.4434693"},{"issue":"1","key":"897_CR24","doi-asserted-by":"crossref","first-page":"48","DOI":"10.1109\/TAC.2008.2009515","volume":"54","author":"A Nedi\u0107","year":"2009","unstructured":"Nedi\u0107, A., Ozdaglar, A.: Distributed subgradient methods for multi-agent optimization. IEEE Trans. Autom. Control 54(1), 48\u201361 (2009)","journal-title":"IEEE Trans. Autom. Control"},{"key":"897_CR25","unstructured":"Polyak, B.T..: Introduction to optimization. Translations series in mathematics and engineering. Optimization Software, Publications Division, New York (1987)"},{"key":"897_CR26","doi-asserted-by":"crossref","unstructured":"Ram, S.S., Nedic, A., Veeravalli, V.V.: Stochastic incremental gradient descent for estimation in sensor networks. In: Signals, Systems and Computers, 2007. ACSSC 2007. Conference Record of the Forty-First Asilomar Conference on, pp. 582\u2013586 (2007)","DOI":"10.1109\/ACSSC.2007.4487280"},{"issue":"3","key":"897_CR27","doi-asserted-by":"crossref","first-page":"400","DOI":"10.1214\/aoms\/1177729586","volume":"22","author":"H Robbins","year":"1951","unstructured":"Robbins, H., Monro, S.: A stochastic approximation method. Ann. Math. Stat. 22(3), 400\u2013407 (1951)","journal-title":"Ann. Math. Stat."},{"key":"897_CR28","unstructured":"Roux, N.L., Schmidt, M., Bach, F.R.: A stochastic gradient method with an exponential convergence rate for finite training sets. In: Pereira, F., Burges, C.J.C., Bottou, L., Weinberger, K.Q. (eds.) Advances in Neural Information Processing Systems 25, pp. 2663\u20132671. Curran Associates Inc., NY, USA (2012)"},{"key":"897_CR29","unstructured":"Schmidt, M., Roux, N.L.: Fast convergence of stochastic gradient descent under a strong growth condition. arXiv preprint arXiv:1308.6370 , 2013"},{"key":"897_CR30","unstructured":"Schraudolph, N., Yu, J., G\u00fcnter, S.: A stochastic quasi-Newton method for online convex optimization. In: Proceedings of the 11th International Conference Artificial Intelligence and Statistics (AISTATS), pp. 433\u2013440 (2007)"},{"issue":"1","key":"897_CR31","first-page":"1000","volume":"32","author":"O Shamir","year":"2014","unstructured":"Shamir, O., Srebro, N., Zhang, T.: Communication efficient distributed optimization using an approximate Newton-type method. ICML 32(1), 1000\u20131008 (2014)","journal-title":"ICML"},{"key":"897_CR32","unstructured":"Sohl-Dickstein, J., Poole, B., Ganguli, S.: Fast large-scale optimization by unifying stochastic gradient and quasi-Newton methods. In: Jebara, T., Xing, E.P. (eds.) ICML, pp. 604\u2013612. JMLR Workshop and Conference Proceedings (2014)"},{"issue":"1","key":"897_CR33","doi-asserted-by":"crossref","first-page":"23","DOI":"10.1023\/A:1018366000512","volume":"11","author":"MV Solodov","year":"1998","unstructured":"Solodov, M.V.: Incremental gradient algorithms with stepsizes bounded away from zero. Comput. Optim. Appl. 11(1), 23\u201335 (1998)","journal-title":"Comput. Optim. Appl."},{"key":"897_CR34","doi-asserted-by":"crossref","unstructured":"Sparks, E.R., Talwalkar, A., Smith, V., Kottalam, J., Xinghao, P., Gonzalez, J., Franklin, M.J., Jordan, M.I, Kraska, T.: MLI: an API for distributed machine learning. In: IEEE 13th International Conference on Data Mining (ICDM), pp. 1187\u20131192 (2013)","DOI":"10.1109\/ICDM.2013.158"},{"issue":"2","key":"897_CR35","doi-asserted-by":"crossref","first-page":"506","DOI":"10.1137\/S1052623495294797","volume":"8","author":"P Tseng","year":"1998","unstructured":"Tseng, P.: An incremental gradient(-projection) method with momentum term and adaptive stepsize rule. SIAM J. Optim. 8(2), 506\u2013531 (1998)","journal-title":"SIAM J. Optim."},{"issue":"3","key":"897_CR36","doi-asserted-by":"crossref","first-page":"832","DOI":"10.1007\/s10957-013-0409-2","volume":"160","author":"P Tseng","year":"2014","unstructured":"Tseng, P., Yun, S.: Incrementally updated gradient methods for constrained and regularized optimization. J. Optim. Theory Appl. 160(3), 832\u2013853 (2014)","journal-title":"J. Optim. Theory Appl."},{"key":"897_CR37","doi-asserted-by":"crossref","unstructured":"Zhang, T.: Solving large scale linear prediction problems using stochastic gradient descent algorithms. In: Proceedings of the Twenty-First International Conference on Machine Learning, ICML, pp. 919\u2013926. ACM, New York (2004)","DOI":"10.1145\/1015330.1015332"}],"container-title":["Mathematical Programming"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s10107-015-0897-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/article\/10.1007\/s10107-015-0897-y\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s10107-015-0897-y","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2019,8,23]],"date-time":"2019-08-23T07:45:41Z","timestamp":1566546341000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/s10107-015-0897-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2015,4,11]]},"references-count":37,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2015,6]]}},"alternative-id":["897"],"URL":"https:\/\/doi.org\/10.1007\/s10107-015-0897-y","relation":{},"ISSN":["0025-5610","1436-4646"],"issn-type":[{"value":"0025-5610","type":"print"},{"value":"1436-4646","type":"electronic"}],"subject":[],"published":{"date-parts":[[2015,4,11]]}}}