{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T19:37:17Z","timestamp":1730230637929,"version":"3.28.0"},"reference-count":32,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,6,4]]},"DOI":"10.1109\/icassp49357.2023.10094576","type":"proceedings-article","created":{"date-parts":[[2023,5,5]],"date-time":"2023-05-05T17:28:30Z","timestamp":1683307710000},"page":"1-5","source":"Crossref","is-referenced-by-count":0,"title":["Newton-Based Trainable Learning Rate"],"prefix":"10.1109","author":[{"given":"George","family":"Retsinas","sequence":"first","affiliation":[{"name":"National Technical University of Athens,School of E.C.E.,Athens,Greece,15773"}]},{"given":"Giorgos","family":"Sfikas","sequence":"additional","affiliation":[{"name":"University of West Attica,Dept. of Surveying &amp; Geoinformatics Engineering,Athens,Greece,12243"}]},{"given":"Panagiotis Paraskevas","family":"Filntisis","sequence":"additional","affiliation":[{"name":"National Technical University of Athens,School of E.C.E.,Athens,Greece,15773"}]},{"given":"Petros","family":"Maragos","sequence":"additional","affiliation":[{"name":"National Technical University of Athens,School of E.C.E.,Athens,Greece,15773"}]}],"member":"263","reference":[{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/WACV.2017.58"},{"key":"ref12","first-page":"1306","article-title":"Stochastic Polyak step-size for SGD: An adaptive learning rate for fast convergence","author":"loizou","year":"2021","journal-title":"International Conference on Artificial Intelligence and Statistics"},{"article-title":"Accurate, large minibatch SGD: Training Imagenet in 1 hour","year":"2017","author":"goyal","key":"ref15"},{"key":"ref14","article-title":"SGDR: Stochastic gradient descent with warm restarts","author":"loshchilov","year":"2017","journal-title":"Proceedings of the International Conference on Learning Representations"},{"key":"ref31","article-title":"Understanding short-horizon bias in stochastic meta-optimization","author":"wu","year":"2018","journal-title":"Proceedings of the International Conference on Learning Representations"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref11","article-title":"Lecture 6.5-RMSProp, Coursera: Neural Networks for machine learning","author":"tieleman","year":"2012","journal-title":"University of Toronto Technical Report"},{"key":"ref10","article-title":"Adaptive subgradient methods for online learning and stochastic optimization","volume":"12","author":"duchi","year":"2011","journal-title":"Journal of Machine Learning Research"},{"article-title":"Improving generalization performance by switching from Adam to SGD","year":"2017","author":"keskar","key":"ref32"},{"key":"ref2","first-page":"1225","article-title":"Train faster, generalize better: Stability of stochastic gradient descent","author":"hardt","year":"2016","journal-title":"International Conference on Machine Learning"},{"key":"ref1","article-title":"When are nonconvex optimization problems not scary?","author":"sun","year":"2016","journal-title":"Ph D thesis"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1137\/17M1134329"},{"journal-title":"Numerical Optimization","year":"2006","author":"nocedal","key":"ref16"},{"key":"ref19","article-title":"Introduction to optimization","author":"polyak","year":"1987","journal-title":"Optimization Software Publication Division"},{"article-title":"Painless stochastic gradient: Interpolation, line-search, and convergence rates","year":"2019","author":"vaswani","key":"ref18"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1049\/cp:19991170"},{"key":"ref23","article-title":"Online learning rate adaptation with hypergradient descent","author":"baydin","year":"2018","journal-title":"Proceedings of the International Conference on Learning Representations"},{"key":"ref26","article-title":"Step-size adaptation using exponentiated gradient updates","author":"amid","year":"2020","journal-title":"ICML Workshop on Beyond First-Order Methods in ML Systems"},{"key":"ref25","first-page":"4556","article-title":"Understanding and correcting pathologies in the training of learned optimizers","author":"metz","year":"2019","journal-title":"International Conference on Machine Learning"},{"key":"ref20","first-page":"452","article-title":"Complexity guarantees for Polyak steps with momentum","author":"barr\u00e9","year":"2020","journal-title":"Conference on Learning Theory"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511569920.007"},{"article-title":"Stochastic mirror descent: Convergence analysis and adaptive variants via the mirror stochastic polyak stepsize","year":"2021","author":"d\u2019orazio","key":"ref21"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref27","article-title":"Learning multiple layers of features from tiny images","author":"krizhevsky","year":"2009","journal-title":"Tech Rep"},{"key":"ref29","article-title":"Wide residual networks","author":"zagoruyko","year":"2016","journal-title":"Proceedings of the British Machine Vision Conference (BMVC)"},{"key":"ref8","article-title":"A method of solving a convex programming problem with convergence rate O(1\/k2)","volume":"27","author":"nesterov","year":"1983","journal-title":"Sov Math Dokl"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1093\/imanum\/8.1.141"},{"key":"ref9","first-page":"289","article-title":"Yellowfin and the art of momentum tuning","volume":"1","author":"zhang","year":"2019","journal-title":"Proceedings of Machine Learning and Systems"},{"key":"ref4","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2015","journal-title":"Proceedings of the International Conference on Learning Representations"},{"key":"ref3","first-page":"242","article-title":"A convergence theory for deep learning via over-parameterization","author":"allen-zhu","year":"2019","journal-title":"International Conference on Machine Learning"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/0041-5553(69)90035-4"},{"key":"ref5","first-page":"35","article-title":"Note sur la convergence de m&#x00E9;thodes de directions conjugu&#x00E9;es","volume":"3","author":"polak","year":"1969","journal-title":"ESAIM Math Modelling Numer Anal -Mod&#x00E9;lisation Math&#x00E9;matique et Analyse Num&#x00E9;rique"}],"event":{"name":"ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2023,6,4]]},"location":"Rhodes Island, Greece","end":{"date-parts":[[2023,6,10]]}},"container-title":["ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10094559\/10094560\/10094576.pdf?arnumber=10094576","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,13]],"date-time":"2023-11-13T18:57:02Z","timestamp":1699901822000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10094576\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6,4]]},"references-count":32,"URL":"https:\/\/doi.org\/10.1109\/icassp49357.2023.10094576","relation":{},"subject":[],"published":{"date-parts":[[2023,6,4]]}}}