{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,31]],"date-time":"2026-03-31T07:22:40Z","timestamp":1774941760425,"version":"3.50.1"},"reference-count":20,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2018,11]]},"DOI":"10.1109\/itw.2018.8613445","type":"proceedings-article","created":{"date-parts":[[2019,1,17]],"date-time":"2019-01-17T19:39:34Z","timestamp":1547753974000},"page":"1-5","source":"Crossref","is-referenced-by-count":26,"title":["Generalization error bounds using Wasserstein distances"],"prefix":"10.1109","author":[{"given":"Adrian Tovar","family":"Lopez","sequence":"first","affiliation":[]},{"given":"Varun","family":"Jog","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Generalization bounds of SGLD for non-convex learning: Two theoretical viewpoints","author":"mou","year":"2017","journal-title":"arXiv preprint 1707 05947"},{"key":"ref11","first-page":"1232","article-title":"Controlling bias in adaptive data analysis using information theory","author":"russo","year":"2016","journal-title":"Proceedings of the 19th International Conference on Artificial Intelligence and Statistics PMLR"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ISIT.2018.8437571"},{"key":"ref13","first-page":"25","article-title":"Learners that use little information","author":"bassily","year":"2018","journal-title":"Algorithmic Learning Theory"},{"key":"ref14","article-title":"Chaining mutual information and tightening generalization bounds","author":"asadi","year":"2018","journal-title":"arXiv preprint arXiv 1806 03803"},{"key":"ref15","article-title":"Topics in optimal transportation","author":"villani","year":"2003","journal-title":"Journal of the American Mathematical Society"},{"key":"ref16","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1561\/0100000064","article-title":"Concentration of measure inequalities in information theory, communications, and coding","volume":"10","author":"raginsky","year":"2013","journal-title":"Foundations and Trends? in Communications and Information Theory"},{"key":"ref17","first-page":"681","article-title":"Bayesian learning via stochastic gradient Langevin dynamics","author":"welling","year":"2011","journal-title":"Proceedings of the 28th International Conference on Machine Learning"},{"key":"ref18","first-page":"797","article-title":"Escaping from saddle points&#x2014; Online stochastic gradient for tensor decomposition","author":"ge","year":"2015","journal-title":"Conference on Learning Theory"},{"key":"ref19","article-title":"How to escape saddle points efficiently","author":"jin","year":"2017","journal-title":"arXiv preprint 1703 00887"},{"key":"ref4","first-page":"499","article-title":"Stability and generalization","volume":"2","author":"bousquet","year":"2002","journal-title":"Journal of Machine Learning Research"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9781107298019"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/s10444-004-7634-z"},{"key":"ref5","first-page":"55","article-title":"Stability of randomized learning algorithms","volume":"6","author":"elisseeff","year":"2005","journal-title":"Journal of Machine Learning Research"},{"key":"ref8","first-page":"1225","article-title":"Train faster, generalize better: Stability of stochastic gradient descent","volume":"48","author":"hardt","year":"2016","journal-title":"Proc 33rd Int Conf Mach Learn"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ITW.2016.7606789"},{"key":"ref2","author":"vapnik","year":"1998","journal-title":"Statistical Learning Theory"},{"key":"ref1","first-page":"2521","article-title":"Information-theoretic analysis of generalization capability of learning algorithms","author":"xu","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref9","article-title":"Generalization bounds for randomized learning with application to stochastic gradient descent","author":"london","year":"2016","journal-title":"NIPS Workshop on Optimizing the Optimizers"},{"key":"ref20","author":"cover","year":"2012","journal-title":"Elements of Information Theory"}],"event":{"name":"2018 IEEE Information Theory Workshop (ITW)","location":"Guangzhou","start":{"date-parts":[[2018,11,25]]},"end":{"date-parts":[[2018,11,29]]}},"container-title":["2018 IEEE Information Theory Workshop (ITW)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8598908\/8613300\/08613445.pdf?arnumber=8613445","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,26]],"date-time":"2022-01-26T18:46:39Z","timestamp":1643222799000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8613445\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018,11]]},"references-count":20,"URL":"https:\/\/doi.org\/10.1109\/itw.2018.8613445","relation":{},"subject":[],"published":{"date-parts":[[2018,11]]}}}