{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,19]],"date-time":"2025-11-19T07:04:49Z","timestamp":1763535889465,"version":"3.37.3"},"reference-count":50,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61876220","61876221","61976164"],"award-info":[{"award-number":["61876220","61876221","61976164"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China through the Project of the Foundation for Innovative Research Groups","doi-asserted-by":"publisher","award":["61621005"],"award-info":[{"award-number":["61621005"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Program for Cheung Kong Scholars and Innovative Research Team in University","award":["IRT_15R53"],"award-info":[{"award-number":["IRT_15R53"]}]},{"name":"Fund for Foreign Scholars in University Research and Teaching Programs","award":["B07048"],"award-info":[{"award-number":["B07048"]}]},{"name":"National Science Basic Research Plan in Shaanxi Province of China","award":["2020JM-194"],"award-info":[{"award-number":["2020JM-194"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2021]]},"DOI":"10.1109\/access.2021.3094282","type":"journal-article","created":{"date-parts":[[2021,7,2]],"date-time":"2021-07-02T19:59:58Z","timestamp":1625255998000},"page":"126159-126171","source":"Crossref","is-referenced-by-count":2,"title":["Efficient Asynchronous Semi-Stochastic Block Coordinate Descent Methods for Large-Scale SVD"],"prefix":"10.1109","volume":"9","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1040-352X","authenticated-orcid":false,"given":"Fanhua","family":"Shang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1394-6586","authenticated-orcid":false,"given":"Zhihui","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Yuanyuan","family":"Liu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8475-2749","authenticated-orcid":false,"given":"Hongying","family":"Liu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8532-2241","authenticated-orcid":false,"given":"Jing","family":"Xu","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","first-page":"2647","article-title":"On variance reduction in stochastic gradient descent and its asynchronous variants","author":"reddi","year":"2015","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref38","first-page":"2379","article-title":"Fast asynchronous parallel stochastic gradient descent: A lock-free approach with convergence guarantee","author":"zhao","year":"2016","journal-title":"Proc 13th AAAI Conf Artif Intell"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-015-0892-3"},{"key":"ref32","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1007\/s10107-012-0614-z","article-title":"Iteration complexity of randomized block-coordinate descent methods for minimizing a composite function","volume":"144","author":"richt\u00e1rik","year":"2012","journal-title":"Math Program"},{"key":"ref31","article-title":"Stochastic variance-reduced heavy ball power iteration","author":"kim","year":"2019","journal-title":"arXiv 1901 08179"},{"key":"ref30","first-page":"1","article-title":"A class of parallel doubly stochastic algorithms for large-scale learning","volume":"21","author":"mokhtari","year":"2020","journal-title":"J Mach Learn Res"},{"key":"ref37","article-title":"GPU asynchronous stochastic gradient descent to speed up neural network training","author":"paine","year":"2014","journal-title":"Proc 2nd Int Conf Learn Represent"},{"key":"ref36","first-page":"975","article-title":"NOMAD: Non-locking, stochastic multi-machine algorithm for asynchronous and decentralized matrix completion","author":"yun","year":"2013","journal-title":"Proc Int Conf Very Large Data Bases"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1145\/2645710.2645725"},{"key":"ref34","first-page":"693","article-title":"HOGWILD!: A lock-free approach to parallelizing stochastic gradient descent","author":"niu","year":"2011","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2021.3070539"},{"key":"ref27","article-title":"An asynchronous parallel randomized Kaczmarz algorithm","author":"liu","year":"2014","journal-title":"arXiv 1401 4780"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2020.3033354"},{"key":"ref2","first-page":"849","article-title":"On spectral clustering: Analysis and an algorithm","author":"ng","year":"2002","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1002\/wics.101"},{"key":"ref20","first-page":"285","article-title":"An asynchronous parallel stochastic coordinate descent algorithm","volume":"16","author":"liu","year":"2015","journal-title":"J Mach Learn Res"},{"key":"ref22","first-page":"315","article-title":"Accelerating stochastic gradient descent using predictive variance reduction","author":"johnson","year":"2013","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1137\/140961134"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1137\/16M1057000"},{"key":"ref23","doi-asserted-by":"crossref","first-page":"242","DOI":"10.1109\/JSTSP.2015.2505682","article-title":"Mini-batch semi-stochastic gradient descent in the proximal setting","volume":"10","author":"kone?n\u00fd","year":"2016","journal-title":"IEEE J Sel Topics Signal Process"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2016.2525015"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/2814566"},{"key":"ref50","first-page":"2287","article-title":"Accelerated variance reduced stochastic ADMM","author":"liu","year":"2017","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"ref10","article-title":"Exploiting numerical sparsity for efficient learning: Faster eigenvector computation and regression","author":"gupta","year":"2018","journal-title":"arXiv 1811 10866"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICDMW.2019.00035"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2018.2878765"},{"key":"ref12","first-page":"2064","article-title":"Coordinate-wise power method","author":"lei","year":"2016","journal-title":"Proc Adv Neural Inf Process Syst"},{"article-title":"Randomized coordinate descent methods for big data optimization","year":"2014","author":"tak\u00e1c","key":"ref13"},{"key":"ref14","first-page":"806","article-title":"Efficient coordinate-wise leading eigenvector computation","author":"wang","year":"2018","journal-title":"Proc Int Conf Algorithmic Learn Theory"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1137\/18M1202505"},{"key":"ref16","doi-asserted-by":"crossref","DOI":"10.56021\/9781421407944","author":"golub","year":"2013","journal-title":"Matrix Computations"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1137\/1.9781611970739"},{"key":"ref18","first-page":"1396","article-title":"Randomized block Krylov methods for stronger and faster approximate singular value decomposition","author":"musco","year":"2015","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/BF00275687"},{"article-title":"The pagerank citation ranking: Bringing order to the web","year":"1999","author":"page","key":"ref4"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.jcss.2003.11.008"},{"key":"ref6","first-page":"144","article-title":"A stochastic PCA and SVD algorithm with an exponential convergence rate","author":"shamir","year":"2015","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/3-540-45071-8_50"},{"key":"ref8","first-page":"2626","article-title":"Faster eigenvector computation via shift-and-invert preconditioning","author":"garber","year":"2016","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref7","first-page":"248","article-title":"Fast stochastic algorithms for SVD and PCA: Convergence properties and convexity","author":"shamir","year":"2016","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref49","first-page":"815","article-title":"ASVRG: Accelerated proximal SVRG","author":"shang","year":"2018","journal-title":"Mach Learn Res"},{"key":"ref9","first-page":"974","article-title":"LazySVD: Even faster SVD decomposition yet without agonizing pain","author":"allen-zhu","year":"2016","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref46","first-page":"1","article-title":"Principal component analysis in the stochastic differential privacy model","author":"shang","year":"2021","journal-title":"Proc Conf Uncertainty Artif Intell"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2019.2958342"},{"key":"ref48","article-title":"Fast stochastic variance reduced gradient method with momentum acceleration for machine learning","author":"shang","year":"2017","journal-title":"arXiv 1703 07948"},{"key":"ref47","article-title":"Behavior mimics distribution: Combining individual and group behaviors for federated learning","author":"huang","year":"2021","journal-title":"Proc 13th Int Joint Conf Artif Intell"},{"key":"ref42","first-page":"5975","article-title":"A simple stochastic variance reduced algorithm with fast convergence rates","author":"zhou","year":"2018","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref41","first-page":"46","article-title":"ASAGA: Asynchronous parallel SAGA","author":"leblond","year":"2017","journal-title":"Proc Int Conf Artif Intell Statist"},{"key":"ref44","first-page":"98","article-title":"Doubly accelerated methods for faster CCA and generalized eigendecomposition","author":"allen-zhu","year":"2017","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref43","first-page":"257","article-title":"Convergence of stochastic gradient descent for PCA","volume":"2016","author":"shamir","year":"0","journal-title":"Proc Int Conf Mach Learn"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/9312710\/09471835.pdf?arnumber=9471835","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,2]],"date-time":"2023-01-02T15:32:35Z","timestamp":1672673555000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9471835\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021]]},"references-count":50,"URL":"https:\/\/doi.org\/10.1109\/access.2021.3094282","relation":{},"ISSN":["2169-3536"],"issn-type":[{"type":"electronic","value":"2169-3536"}],"subject":[],"published":{"date-parts":[[2021]]}}}