{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,4]],"date-time":"2026-02-04T18:16:10Z","timestamp":1770228970474,"version":"3.49.0"},"reference-count":31,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2023,8,28]],"date-time":"2023-08-28T00:00:00Z","timestamp":1693180800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,8,28]],"date-time":"2023-08-28T00:00:00Z","timestamp":1693180800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/100020409","name":"Analytical Center for the Government of the Russian Federation","doi-asserted-by":"publisher","award":["000000D730321P5Q000"],"award-info":[{"award-number":["000000D730321P5Q000"]}],"id":[{"id":"10.13039\/100020409","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Comput Manag Sci"],"published-print":{"date-parts":[[2023,12]]},"DOI":"10.1007\/s10287-023-00470-2","type":"journal-article","created":{"date-parts":[[2023,8,28]],"date-time":"2023-08-28T16:04:21Z","timestamp":1693238661000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["Gradient-free methods for non-smooth convex stochastic optimization with heavy-tailed noise on convex compact"],"prefix":"10.1007","volume":"20","author":[{"given":"Nikita","family":"Kornilov","sequence":"first","affiliation":[]},{"given":"Alexander","family":"Gasnikov","sequence":"additional","affiliation":[]},{"given":"Pavel","family":"Dvurechensky","sequence":"additional","affiliation":[]},{"given":"Darina","family":"Dvinskikh","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,8,28]]},"reference":[{"key":"470_CR1","unstructured":"Akhavan A, Chzhen E, Pontil M, Tsybakov AB (2022) A gradient estimator via l1-randomization for online zero-order optimization with two point feedback. arXiv preprint arXiv:2205.13910"},{"key":"470_CR2","doi-asserted-by":"publisher","first-page":"1399","DOI":"10.1134\/S0005117918080039","volume":"79","author":"AS Bayandina","year":"2018","unstructured":"Bayandina AS, Gasnikov AV, Lagunovskaya AA (2018) Gradient-free two-point methods for solving stochastic nonsmooth convex optimization problems with small non-random noises. Autom Remote Control 79:1399\u20131408","journal-title":"Autom Remote Control"},{"key":"470_CR3","doi-asserted-by":"publisher","DOI":"10.1137\/1.9780898718829","volume-title":"Lectures on modern convex optimization: analysis, algorithms, and engineering applications","author":"A Ben-Tal","year":"2001","unstructured":"Ben-Tal A, Nemirovski A (2001) Lectures on modern convex optimization: analysis, algorithms, and engineering applications. SIAM, Philadelphia"},{"key":"470_CR4","doi-asserted-by":"crossref","unstructured":"Beznosikov A, Sadiev A, Gasnikov A (2020) Gradient-free methods with inexact oracle for convex-concave stochastic saddle-point problem. In: Mathematical optimization theory and operations research: 19th international conference, MOTOR 2020, Novosibirsk, Russia, July 6\u201310, 2020, Revised Selected Papers 19. Springer, pp 105\u2013119","DOI":"10.1007\/978-3-030-58657-7_11"},{"key":"470_CR5","doi-asserted-by":"publisher","DOI":"10.1137\/1.9780898718768","volume-title":"Introduction to derivative-free optimization","author":"AR Conn","year":"2009","unstructured":"Conn AR, Scheinberg K, Vicente LN (2009) Introduction to derivative-free optimization. SIAM, Montreal"},{"issue":"1","key":"470_CR6","first-page":"2237","volume":"22","author":"D Davis","year":"2021","unstructured":"Davis D, Drusvyatskiy D, Xiao L, Zhang J (2021) From low probability to high confidence in stochastic convex optimization. J Mach Learn Res 22(1):2237\u20132274","journal-title":"J Mach Learn Res"},{"issue":"5","key":"470_CR7","doi-asserted-by":"publisher","first-page":"2788","DOI":"10.1109\/TIT.2015.2409256","volume":"61","author":"JC Duchi","year":"2015","unstructured":"Duchi JC, Jordan MI, Wainwright MJ, Wibisono A (2015) Optimal rates for zero-order convex optimization: the power of two function evaluations. IEEE Trans Inf Theory 61(5):2788\u20132806","journal-title":"IEEE Trans Inf Theory"},{"key":"470_CR8","unstructured":"Dvinskikh D, Tominin V, Tominin Y, Gasnikov A (2022) Gradient-free optimization for non-smooth minimax problems with maximum value of adversarial noise. arXiv preprint arXiv:2202.06114"},{"key":"470_CR9","doi-asserted-by":"publisher","first-page":"48","DOI":"10.1134\/S0965542518010050","volume":"58","author":"AV Gasnikov","year":"2018","unstructured":"Gasnikov AV, Nesterov YE (2018) Universal method for stochastic composite optimization problems. Comput Math Math Phys 58:48\u201364","journal-title":"Comput Math Math Phys"},{"key":"470_CR10","doi-asserted-by":"publisher","first-page":"2018","DOI":"10.1134\/S0005117916110114","volume":"77","author":"AV Gasnikov","year":"2016","unstructured":"Gasnikov AV, Lagunovskaya AA, Usmanova IN, Fedorenko FA (2016) Gradient-free proximal methods with inexact oracle for convex stochastic nonsmooth optimization problems on the simplex. Autom Remote Control 77:2018\u20132034","journal-title":"Autom Remote Control"},{"key":"470_CR11","doi-asserted-by":"publisher","first-page":"224","DOI":"10.1134\/S0005117917020035","volume":"78","author":"AV Gasnikov","year":"2017","unstructured":"Gasnikov AV, Krymova EA, Lagunovskaya AA, Usmanova IN, Fedorenko FA (2017) Stochastic online optimization. Single-point and multi-point non-linear multi-armed bandits. Convex and strongly-convex case. Autom Remote Control 78:224\u2013234","journal-title":"Autom Remote Control"},{"key":"470_CR12","doi-asserted-by":"crossref","unstructured":"Gasnikov A, Dvinskikh D, Dvurechensky P, Gorbunov E, Beznosikov A, Lobanov A (2022a) Randomized gradient-free methods in convex optimization. arXiv preprint arXiv:2211.13566","DOI":"10.1007\/978-3-030-54621-2_859-1"},{"key":"470_CR13","unstructured":"Gasnikov A, Novitskii A, Novitskii V, Abdukhakimov F, Kamzolov D, Beznosikov A, Tak\u00e1\u010d M, Dvurechensky P, Gu B (2022b) The power of first-order smooth optimization for black-box non-smooth problems. arXiv preprint arXiv:2201.12289"},{"key":"470_CR14","doi-asserted-by":"publisher","first-page":"11","DOI":"10.1134\/S0001434619070022","volume":"106","author":"E Gorbunov","year":"2019","unstructured":"Gorbunov E, Vorontsova EA, Gasnikov AV (2019) On the upper bound for the expectation of the norm of a vector uniformly distributed on the sphere and the phenomenon of concentration of uniform measure on the sphere. Math Notes 106:11\u201319","journal-title":"Math Notes"},{"key":"470_CR15","unstructured":"Gorbunov E, Danilova M, Shibaev I, Dvurechensky P, Gasnikov A (2021) Near-optimal high probability complexity bounds for non-smooth stochastic optimization with heavy-tailed noise. arXiv preprint arXiv:2106.05958"},{"issue":"1","key":"470_CR16","doi-asserted-by":"publisher","first-page":"44","DOI":"10.1287\/10-SSY010","volume":"4","author":"A Juditsky","year":"2014","unstructured":"Juditsky A, Nesterov Y (2014) Deterministic and stochastic primal-dual subgradient algorithms for uniformly convex minimization. Stochastic Syst 4(1):44\u201380","journal-title":"Stochastic Syst"},{"key":"470_CR17","doi-asserted-by":"crossref","unstructured":"Ledoux M (2005) The concentration of measure phenomenon. ed. by Peter Landweber et al. vol. 89. Mathematical Surveys and Monographs. American Mathematical Society, Providence, 181","DOI":"10.1090\/surv\/089"},{"key":"470_CR18","unstructured":"Liu Z, Zhou Z (2023) Stochastic nonsmooth convex optimization with heavy-tailed noises. arXiv preprint arXiv:2303.12277"},{"key":"470_CR19","doi-asserted-by":"crossref","unstructured":"Lobanov A, Alashqar B, Dvinskikh D, Gasnikov A (2022) Gradient-free federated learning methods with $$l_1$$ and $$l_2$$-randomization for non-smooth convex stochastic optimization problems. arXiv preprint arXiv:2211.10783","DOI":"10.1134\/S0965542523090026"},{"key":"470_CR20","doi-asserted-by":"publisher","first-page":"1607","DOI":"10.1134\/S0005117919090042","volume":"80","author":"AV Nazin","year":"2019","unstructured":"Nazin AV, Nemirovsky AS, Tsybakov AB, Juditsky AB (2019) Algorithms of robust stochastic optimization based on mirror descent method. Autom Remote Control 80:1607\u20131627","journal-title":"Autom Remote Control"},{"key":"470_CR21","unstructured":"Nemirovskij AS, Yudin DB (1983) Problem complexity and method efficiency in optimization"},{"key":"470_CR23","doi-asserted-by":"publisher","first-page":"527","DOI":"10.1007\/s10208-015-9296-2","volume":"17","author":"Y Nesterov","year":"2017","unstructured":"Nesterov Y, Spokoiny V (2017) Random gradient-free minimization of convex functions. Found Comput Math 17:527\u2013566","journal-title":"Found Comput Math"},{"key":"470_CR24","unstructured":"Nguyen TD, Ene A, Nguyen HL (2023a) Improved convergence in high probability of clipped gradient methods with heavy tails. arXiv preprint arXiv:2304.01119"},{"key":"470_CR25","unstructured":"Nguyen TD, Nguyen TH, Ene A, Nguyen HL (2023b) High probability convergence of clipped-SGD under heavy-tailed noise. arXiv preprint arXiv:2302.05437"},{"key":"470_CR26","unstructured":"Sadiev A, Danilova M, Gorbunov E, Horv\u00e1th S, Gidel G, Dvurechensky P, Gasnikov A, Richt\u00e1rik P (2023) High-probability bounds for stochastic optimization and variational inequalities: the case of unbounded variance. arXiv preprint arXiv:2302.00999"},{"issue":"1","key":"470_CR27","first-page":"1703","volume":"18","author":"O Shamir","year":"2017","unstructured":"Shamir O (2017) An optimal algorithm for bandit and zero-order convex optimization with two-point feedback. J Mach Learn Res 18(1):1703\u20131713","journal-title":"J Mach Learn Res"},{"key":"470_CR28","doi-asserted-by":"publisher","DOI":"10.1137\/1.9781611976595","volume-title":"Lectures on stochastic programming: modeling and theory","author":"A Shapiro","year":"2021","unstructured":"Shapiro A, Dentcheva D, Ruszczynski A (2021) Lectures on stochastic programming: modeling and theory. SIAM, Philadelphia"},{"key":"470_CR29","volume-title":"Introduction to stochastic search and optimization: estimation, simulation, and control","author":"JC Spall","year":"2005","unstructured":"Spall JC (2005) Introduction to stochastic search and optimization: estimation, simulation, and control. Wiley, Chichester"},{"key":"470_CR30","unstructured":"Vural NM, Yu L, Balasubramanian K, Volgushev S, Erdogdu MA (2022) Mirror descent strikes again: Optimal stochastic convex optimization under infinite noise variance. In: Conference on learning theory. PMLR, pp 65\u2013102"},{"key":"470_CR32","unstructured":"Zhang J, Cutkosky A (2022) Parameter-free regret in high probability with heavy tails. arXiv preprint arXiv:2210.14355"},{"key":"470_CR31","first-page":"15383","volume":"33","author":"J Zhang","year":"2020","unstructured":"Zhang J, Karimireddy SP, Veit A, Kim S, Reddi S, Kumar S, Sra S (2020) Why are adaptive methods good for attention models? Adv Neural Inf Process Syst 33:15383\u201315393","journal-title":"Adv Neural Inf Process Syst"}],"container-title":["Computational Management Science"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10287-023-00470-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10287-023-00470-2\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10287-023-00470-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,26]],"date-time":"2024-10-26T23:43:02Z","timestamp":1729986182000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10287-023-00470-2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,8,28]]},"references-count":31,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2023,12]]}},"alternative-id":["470"],"URL":"https:\/\/doi.org\/10.1007\/s10287-023-00470-2","relation":{},"ISSN":["1619-697X","1619-6988"],"issn-type":[{"value":"1619-697X","type":"print"},{"value":"1619-6988","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,8,28]]},"assertion":[{"value":"25 May 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"31 July 2023","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"28 August 2023","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}],"article-number":"37"}}