{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,28]],"date-time":"2026-01-28T21:38:22Z","timestamp":1769636302080,"version":"3.49.0"},"reference-count":29,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T00:00:00Z","timestamp":1556668800000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["61673364"],"award-info":[{"award-number":["61673364"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Mach Learn"],"published-print":{"date-parts":[[2019,5]]},"DOI":"10.1007\/s10994-019-05785-3","type":"journal-article","created":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T22:02:47Z","timestamp":1556748167000},"page":"859-878","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":7,"title":["An accelerated variance reducing stochastic method with Douglas-Rachford splitting"],"prefix":"10.1007","volume":"108","author":[{"given":"Jingchang","family":"Liu","sequence":"first","affiliation":[]},{"given":"Linli","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Shuheng","family":"Shen","sequence":"additional","affiliation":[]},{"given":"Qing","family":"Ling","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2019,5,1]]},"reference":[{"key":"5785_CR1","doi-asserted-by":"crossref","unstructured":"Allen-Zhu, Z. (2017). Katyusha: The first direct acceleration of stochastic gradient methods. In Proceedings of the 49th annual ACM SIGACT symposium on theory of computing, ACM (pp. 1200\u20131205).","DOI":"10.1145\/3055399.3055448"},{"key":"5785_CR2","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-48311-5","volume-title":"Convex analysis and monotone operator theory in Hilbert spaces","author":"HH Bauschke","year":"2017","unstructured":"Bauschke, H. H., & Combettes, P. L. (2017). Convex analysis and monotone operator theory in Hilbert spaces. Berlin: Springer."},{"key":"5785_CR3","unstructured":"Bottou, L., Curtis, F. E., & Nocedal J. (2016). Optimization methods for large-scale machine learning. \n                    arXiv:1606.4838\n                    \n                  ."},{"key":"5785_CR4","doi-asserted-by":"publisher","first-page":"27:1","DOI":"10.1145\/1961189.1961199","volume":"2","author":"CC Chang","year":"2011","unstructured":"Chang, C. C., & Lin, C. J. (2011). LIBSVM: A library for support vector machines. ACM Transactions on Intelligent Systems and Technology, 2, 27:1\u201327:27.","journal-title":"ACM Transactions on Intelligent Systems and Technology"},{"key":"5785_CR5","unstructured":"Defazio, A. (2016). A simple practical accelerated method for finite sums. In Advances in neural information processing systems (pp. 676\u2013684)."},{"key":"5785_CR6","unstructured":"Defazio, A., Bach, F., & Lacoste-Julien, S. (2014). Saga: A fast incremental gradient method with support for non-strongly convex composite objectives. In Advances in neural information processing systems (pp. 1646\u20131654)."},{"key":"5785_CR7","first-page":"2899","volume":"10","author":"J Duchi","year":"2009","unstructured":"Duchi, J., & Singer, Y. (2009). Efficient online and batch learning using forward backward splitting. Journal of Machine Learning Research, 10, 2899\u20132934.","journal-title":"Journal of Machine Learning Research"},{"issue":"1\u20133","key":"5785_CR8","doi-asserted-by":"publisher","first-page":"293","DOI":"10.1007\/BF01581204","volume":"55","author":"J Eckstein","year":"1992","unstructured":"Eckstein, J., & Bertsekas, D. P. (1992). On the douglasrachford splitting method and the proximal point algorithm for maximal monotone operators. Mathematical Programming, 55(1\u20133), 293\u2013318.","journal-title":"Mathematical Programming"},{"key":"5785_CR9","doi-asserted-by":"publisher","DOI":"10.1007\/978-0-387-84858-7","volume-title":"The elements of statistical learning: Data mining, inference and prediction","author":"T Hastie","year":"2009","unstructured":"Hastie, T., Tibshirani, R., & Friedman, J. (2009). The elements of statistical learning: Data mining, inference and prediction (2nd ed.). Berlin: Springer.","edition":"2"},{"key":"5785_CR10","unstructured":"Johnson, R., & Zhang, T. (2013). Accelerating stochastic gradient descent using predictive variance reduction. In Advances in neural information processing systems (pp. 315\u2013323)."},{"key":"5785_CR11","first-page":"777","volume":"10","author":"J Langford","year":"2009","unstructured":"Langford, J., Li, L., & Zhang, T. (2009). Sparse online learning via truncated gradient. Journal of Machine Learning Research, 10, 777\u2013801.","journal-title":"Journal of Machine Learning Research"},{"issue":"2","key":"5785_CR12","doi-asserted-by":"publisher","first-page":"367","DOI":"10.1137\/S1052623494267127","volume":"7","author":"C Lemar\u00e9chal","year":"1997","unstructured":"Lemar\u00e9chal, C., & Sagastiz\u00e1bal, C. (1997). Practical aspects of the Moreau-Yosida regularization: Theoretical preliminaries. SIAM Journal on Optimization, 7(2), 367\u2013385.","journal-title":"SIAM Journal on Optimization"},{"key":"5785_CR13","unstructured":"Lin, H., Mairal, J., Harchaoui, Z. (2015). A universal catalyst for first-order optimization. In Advances in neural information processing systems (pp. 3384\u20133392)."},{"key":"5785_CR14","unstructured":"Lin, H., Mairal, J., Harchaoui, Z. (2017). Catalyst acceleration for first-order convex optimization: From theory to practice. \n                    arXiv:1712.5654\n                    \n                  ."},{"key":"5785_CR15","unstructured":"Needell, D., Ward, R., & Srebro, N. (2014). Stochastic gradient descent, weighted sampling, and the randomized kaczmarz algorithm. In Advances in neural information processing systems (pp. 1017\u20131025)."},{"key":"5785_CR16","volume-title":"Introductory lectures on convex optimization: A basic course","author":"Y Nesterov","year":"2013","unstructured":"Nesterov, Y. (2013). Introductory lectures on convex optimization: A basic course (Vol. 87). Berlin: Springer."},{"key":"5785_CR17","unstructured":"Owen, A. B. (2013) Monte Carlo theory, methods and examples."},{"issue":"3","key":"5785_CR18","doi-asserted-by":"publisher","first-page":"127","DOI":"10.1561\/2400000003","volume":"1","author":"N Parikh","year":"2014","unstructured":"Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and Trends in Optimization, 1(3), 127\u2013239. \n                    https:\/\/doi.org\/10.1561\/2400000003\n                    \n                  .","journal-title":"Foundations and Trends in Optimization"},{"key":"5785_CR19","doi-asserted-by":"publisher","first-page":"400","DOI":"10.1214\/aoms\/1177729586","volume":"22","author":"H Robbins","year":"1951","unstructured":"Robbins, H., & Monro, S. (1951). A stochastic approximation method. The Annals of Mathematical Statistics, 22, 400\u2013407.","journal-title":"The Annals of Mathematical Statistics"},{"key":"5785_CR20","doi-asserted-by":"publisher","first-page":"153","DOI":"10.1016\/B978-0-12-415825-2.00009-7","volume-title":"Simulation","author":"S Ross","year":"2013","unstructured":"Ross, S. (2013). Chapter 9 - variance reduction techniques. In S. Ross (Ed.), Simulation (5th ed., pp. 153\u2013231). Cambridge: Academic Press.","edition":"5"},{"issue":"1\u20132","key":"5785_CR21","doi-asserted-by":"publisher","first-page":"83","DOI":"10.1007\/s10107-016-1030-6","volume":"162","author":"M Schmidt","year":"2017","unstructured":"Schmidt, M., Le Roux, N., & Bach, F. (2017). Minimizing finite sums with the stochastic average gradient. Mathematical Programming, 162(1\u20132), 83\u2013112.","journal-title":"Mathematical Programming"},{"key":"5785_CR22","unstructured":"Shalev-Shwartz, S., & Zhang, T. (2012). Proximal stochastic dual coordinate ascent. \n                    arXiv:1211.2717\n                    \n                  ."},{"key":"5785_CR23","first-page":"567","volume":"14(Feb)","author":"S Shalev-Shwartz","year":"2013","unstructured":"Shalev-Shwartz, S., & Zhang, T. (2013). Stochastic dual coordinate ascent methods for regularized loss minimization. Journal of Machine Learning Research, 14(Feb), 567\u2013599.","journal-title":"Journal of Machine Learning Research"},{"key":"5785_CR24","unstructured":"Shalev-Shwartz, S., & Zhang, T. (2014). Accelerated proximal stochastic dual coordinate ascent for regularized loss minimization. In International conference on machine learning (pp. 64\u201372)."},{"key":"5785_CR25","unstructured":"Shamir, O., & Zhang, T. (2013). Stochastic gradient descent for non-smooth optimization: Convergence results and optimal averaging schemes. In International conference on machine learning (pp. 71\u201379)."},{"key":"5785_CR26","first-page":"3639","volume-title":"Advances in Neural Information Processing Systems","author":"BE Woodworth","year":"2016","unstructured":"Woodworth, B. E., & Srebro, N. (2016). Tight complexity bounds for optimizing composite objectives. In D. D. Lee, M. Sugiyama, U. V. Luxburg, I. Guyon, & R. Garnett (Eds.), Advances in Neural Information Processing Systems (Vol. 29, pp. 3639\u20133647). New York: Curran Associates, Inc.,"},{"issue":"4","key":"5785_CR27","doi-asserted-by":"publisher","first-page":"2057","DOI":"10.1137\/140961791","volume":"24","author":"L Xiao","year":"2014","unstructured":"Xiao, L., & Zhang, T. (2014). A proximal stochastic gradient method with progressive variance reduction. SIAM Journal on Optimization, 24(4), 2057\u20132075.","journal-title":"SIAM Journal on Optimization"},{"key":"5785_CR28","unstructured":"Zhao, P., & Zhang, T. (2014) Accelerating minibatch stochastic gradient descent using stratified sampling. \n                    arXiv:1405.3080\n                    \n                  ."},{"key":"5785_CR29","unstructured":"Zhao, P., & Zhang, T. (2015). Stochastic optimization with importance sampling for regularized loss minimization. In Proceedings of the 32nd international conference on machine learning (ICML-15) (pp. 1\u20139)."}],"container-title":["Machine Learning"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s10994-019-05785-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/article\/10.1007\/s10994-019-05785-3\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s10994-019-05785-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:09:34Z","timestamp":1588291774000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/s10994-019-05785-3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,5]]},"references-count":29,"journal-issue":{"issue":"5","published-print":{"date-parts":[[2019,5]]}},"alternative-id":["5785"],"URL":"https:\/\/doi.org\/10.1007\/s10994-019-05785-3","relation":{},"ISSN":["0885-6125","1573-0565"],"issn-type":[{"value":"0885-6125","type":"print"},{"value":"1573-0565","type":"electronic"}],"subject":[],"published":{"date-parts":[[2019,5]]},"assertion":[{"value":"21 April 2018","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"9 January 2019","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"1 May 2019","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"This content has been made available to all.","name":"free","label":"Free to read"}]}}