{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,9]],"date-time":"2026-04-09T07:33:02Z","timestamp":1775719982339,"version":"3.50.1"},"reference-count":42,"publisher":"Springer Science and Business Media LLC","issue":"7","license":[{"start":{"date-parts":[[2025,5,22]],"date-time":"2025-05-22T00:00:00Z","timestamp":1747872000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,5,22]],"date-time":"2025-05-22T00:00:00Z","timestamp":1747872000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61903358"],"award-info":[{"award-number":["61903358"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61773367"],"award-info":[{"award-number":["61773367"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61821005"],"award-info":[{"award-number":["61821005"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004739","name":"Youth Innovation Promotion Association of the Chinese Academy of Sciences","doi-asserted-by":"publisher","award":["2022196"],"award-info":[{"award-number":["2022196"]}],"id":[{"id":"10.13039\/501100004739","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004739","name":"Youth Innovation Promotion Association of the Chinese Academy of Sciences","doi-asserted-by":"publisher","award":["Y202051"],"award-info":[{"award-number":["Y202051"]}],"id":[{"id":"10.13039\/501100004739","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Mach Learn"],"published-print":{"date-parts":[[2025,7]]},"DOI":"10.1007\/s10994-025-06782-5","type":"journal-article","created":{"date-parts":[[2025,5,22]],"date-time":"2025-05-22T13:08:05Z","timestamp":1747919285000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Rope-net: deep convolutional neural network via robust principal component analysis"],"prefix":"10.1007","volume":"114","author":[{"given":"Baichen","family":"Liu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8039-6679","authenticated-orcid":false,"given":"Zhi","family":"Han","sequence":"additional","affiliation":[]},{"given":"Xi\u2019ai","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Yanmei","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Yandong","family":"Tang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,5,22]]},"reference":[{"key":"6782_CR1","unstructured":"Bellec, G., Kappel, D., Maass, W., et\u00a0al. (2017). Deep rewiring: Training very sparse deep networks. In: International Conference on Learning Representations"},{"key":"6782_CR2","doi-asserted-by":"crossref","unstructured":"Cand\u00e8s, E. J., Li, X., Ma, Y., et al. (2011). Robust principal component analysis. Journal of the ACM, 58(3), 11.","DOI":"10.1145\/1970392.1970395"},{"key":"6782_CR3","unstructured":"Chellapilla, K., Puri, S., & Simard, P. (2006). High performance convolutional neural networks for document processing. In: Tenth International Workshop on Frontiers in Handwriting Recognition"},{"key":"6782_CR4","doi-asserted-by":"crossref","unstructured":"Cheng, Z., Li, B., Fan, Y., et\u00a0al. (2020). A novel rank selection scheme in tensor ring decomposition based on reinforcement learning for deep neural networks. In: ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3292-3296","DOI":"10.1109\/ICASSP40776.2020.9053292"},{"key":"6782_CR5","unstructured":"Denil, M., Shakibi, B., Dinh, L., et\u00a0al. (2013). Predicting parameters in deep learning. In: Neural Information Processing Systems, pp 2148 \u2013 2156"},{"key":"6782_CR6","unstructured":"Denton, E. L., Zaremba, W., Bruna, J., et\u00a0al. (2014). Exploiting linear structure within convolutional networks for efficient evaluation. In: Neural Information Processing Systems, pp 1269 \u2013 1277"},{"key":"6782_CR7","unstructured":"Dettmers, T., & Zettlemoyer, L. (2019). Sparse networks from scratch: Faster training without losing performance. arXiv:1907.04840"},{"key":"6782_CR8","unstructured":"Frankle, J., & Carbin, M. (2018). The lottery ticket hypothesis: Finding sparse, trainable neural networks. In: International Conference on Learning Representations"},{"key":"6782_CR9","unstructured":"Frankle, J., Dziugaite, G. K., Roy, D. M., et\u00a0al. (2019). The lottery ticket hypothesis at scale. arXiv:1903.01611v1"},{"key":"6782_CR10","unstructured":"Garipov, T., Podoprikhin, D., Novikov, A., et\u00a0al. (2016). Ultimate tensorization: Compressing convolutional and fc layers alike. arXiv preprint arXiv:1611.03214"},{"key":"6782_CR11","doi-asserted-by":"publisher","first-page":"150823","DOI":"10.1109\/ACCESS.2019.2947846","volume":"7","author":"K Guo","year":"2019","unstructured":"Guo, K., Xie, X., Xu, X., et al. (2019). Compressing by learning in a low-rank and sparse decomposition form. IEEE Access., 7, 150823\u2013150832.","journal-title":"IEEE Access."},{"key":"6782_CR12","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., et\u00a0al. (2016). Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"6782_CR13","doi-asserted-by":"crossref","unstructured":"Herculano-Houzel, S., Mota, B., Wong, P., et\u00a0al. (2010). Connectivity-driven white matter scaling and folding in primate cerebral cortex. Proceedings of the National Academy of Sciences of the United States of America 107(44):19,008\u201319,013","DOI":"10.1073\/pnas.1012590107"},{"issue":"6","key":"6782_CR14","doi-asserted-by":"publisher","first-page":"498","DOI":"10.1037\/h0070888","volume":"24","author":"H Hotelling","year":"1933","unstructured":"Hotelling, H. (1933). Analysis of a complex of statistical variables into principal components. Journal of Educational Psychology, 24(6), 498\u2013520.","journal-title":"Journal of Educational Psychology"},{"key":"6782_CR15","doi-asserted-by":"crossref","unstructured":"Huang, Z., spsampsps Wang, N. (2018). Data-driven sparse structure selection for deep neural networks. In: Proceedings of the European Conference on Computer Vision (ECCV), pp 317\u2013334","DOI":"10.1007\/978-3-030-01270-0_19"},{"key":"6782_CR16","doi-asserted-by":"crossref","unstructured":"Jaderberg, M., Vedaldi, A., & Zisserman, A. (2014). Speeding up convolutional neural networks with low rank expansions. In: British Machine Vision Conference 2014","DOI":"10.5244\/C.28.88"},{"key":"6782_CR17","doi-asserted-by":"crossref","unstructured":"Jolliffe, I. (1986). Principal component analysis","DOI":"10.1007\/978-1-4757-1904-8"},{"key":"6782_CR18","doi-asserted-by":"crossref","unstructured":"Kim, Y. D., Park, E., Yoo, S., et\u00a0al. (2016). Compression of deep convolutional neural networks for fast and low power mobile applications","DOI":"10.14257\/astl.2016.140.36"},{"issue":"7","key":"6782_CR19","doi-asserted-by":"publisher","first-page":"1867","DOI":"10.1007\/s10994-021-05987-8","volume":"110","author":"H Kong","year":"2021","unstructured":"Kong, H., Lu, C., & Lin, Z. (2021). Tensor q-rank: New data dependent definition of tensor rank. Machine Learning, 110(7), 1867\u20131900.","journal-title":"Machine Learning"},{"issue":"4","key":"6782_CR20","first-page":"1","volume":"1","author":"A Krizhevsky","year":"2009","unstructured":"Krizhevsky, A., & Hinton, G. (2009). Learning multiple layers of features from tiny images. CsTorontoEdu, 1(4), 1\u201358.","journal-title":"CsTorontoEdu"},{"key":"6782_CR21","unstructured":"Krizhevsky, A., Sutskever, I., & Hinton, G. E. (2012). Imagenet classification with deep convolutional neural networks. Advances in Neural Information Processing Systems 25"},{"key":"6782_CR22","unstructured":"Kusupati, A., Ramanujan, V., Somani, R., et\u00a0al. (2020). Soft threshold weight reparameterization for learnable sparsity. In: ICML 2020: 37th International Conference on Machine Learning, pp 5544\u20135555"},{"key":"6782_CR23","unstructured":"Lebedev, V., Ganin, Y., Rakhuba, M., et\u00a0al. (2015). Speeding-up convolutional neural networks using fine-tuned cp-decomposition. In: ICLR 2015 : International Conference on Learning Representations 2015"},{"issue":"11","key":"6782_CR24","doi-asserted-by":"publisher","first-page":"2278","DOI":"10.1109\/5.726791","volume":"86","author":"Y Lecun","year":"1998","unstructured":"Lecun, Y., Bottou, L., Bengio, Y., et al. (1998). Gradient-based learning applied to document recognition. Proceedings of the IEEE, 86(11), 2278\u20132323.","journal-title":"Proceedings of the IEEE"},{"key":"6782_CR25","unstructured":"Lee, C. Y., Xie, S., Gallagher, P., et\u00a0al. (2015). Deeply-supervised nets. In: Artificial intelligence and statistics, pp 562 \u2013 570"},{"key":"6782_CR26","doi-asserted-by":"crossref","unstructured":"Li, Y., Lin, S., Zhang, B., et\u00a0al. (2019). Exploiting kernel sparsity and entropy for interpretable cnn compression. In: 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp 2800\u20132809","DOI":"10.1109\/CVPR.2019.00291"},{"key":"6782_CR27","doi-asserted-by":"crossref","unstructured":"Li, Y., Gu, S., Mayer, C., et\u00a0al. (2020). Group sparsity: The hinge between filter pruning and decomposition for network compression. In: IEEE Conference on Computer Vision and Pattern Recognition, pp 8018 \u2013 8027","DOI":"10.1109\/CVPR42600.2020.00804"},{"issue":"2","key":"6782_CR28","doi-asserted-by":"publisher","first-page":"685","DOI":"10.1007\/s10994-021-06049-9","volume":"111","author":"N Liao","year":"2021","unstructured":"Liao, N., Wang, S., Xiang, L., et al. (2021). Achieving adversarial robustness via sparsity. Machine Learning, 111(2), 685\u2013711.","journal-title":"Machine Learning"},{"key":"6782_CR29","doi-asserted-by":"crossref","unstructured":"Lu, C., Feng, J., Chen, Y., et\u00a0al. (2016). Tensor robust principal component analysis: Exact recovery of corrupted low-rank tensors via convex optimization. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp 5249\u20135257","DOI":"10.1109\/CVPR.2016.567"},{"issue":"1","key":"6782_CR30","doi-asserted-by":"publisher","first-page":"2383","DOI":"10.1038\/s41467-018-04316-3","volume":"9","author":"DC Mocanu","year":"2018","unstructured":"Mocanu, D. C., Mocanu, E., Stone, P., et al. (2018). Scalable training of artificial neural networks with adaptive sparse connectivity inspired by network science. Nature Communications, 9(1), 2383\u20132383.","journal-title":"Nature Communications"},{"key":"6782_CR31","unstructured":"Mostafa, H., & Wang, X. (2019). Parameter efficient training of deep convolutional neural networks by dynamic sparse reparameterization. In: International Conference on Machine Learning, pp 4646\u20134655"},{"issue":"23","key":"6782_CR32","doi-asserted-by":"publisher","first-page":"3311","DOI":"10.1016\/S0042-6989(97)00169-7","volume":"37","author":"BA Olshausen","year":"1997","unstructured":"Olshausen, B. A., & Field, D. J. (1997). Sparse coding with an overcomplete basis set: A strategy employed by v1? Vision Research, 37(23), 3311\u20133325.","journal-title":"Vision Research"},{"issue":"5","key":"6782_CR33","doi-asserted-by":"publisher","first-page":"2295","DOI":"10.1137\/090752286","volume":"33","author":"IV Oseledets","year":"2011","unstructured":"Oseledets, I. V. (2011). Tensor-train decomposition. SIAM Journal on Scientific Computing, 33(5), 2295\u20132317.","journal-title":"SIAM Journal on Scientific Computing"},{"key":"6782_CR34","unstructured":"Simonyan, K., & Zisserman, A. (2015). Very deep convolutional networks for large-scale image recognition. In: ICLR 2015 : International Conference on Learning Representations 2015"},{"key":"6782_CR35","unstructured":"Toh, K. C., & Yun, S. (2009). An accelerated proximal gradient algorithm for nuclear norm regularized linear least squares problems"},{"issue":"4","key":"6782_CR36","doi-asserted-by":"publisher","first-page":"2880","DOI":"10.1109\/TETCI.2024.3386838","volume":"8","author":"X Wang","year":"2024","unstructured":"Wang, X., Chen, X., Ren, W., et al. (2024). Compensation atmospheric scattering model and two-branch network for single image dehazing. IEEE Transactions on Emerging Topics in Computational Intelligence, 8(4), 2880\u20132896. https:\/\/doi.org\/10.1109\/TETCI.2024.3386838","journal-title":"IEEE Transactions on Emerging Topics in Computational Intelligence"},{"key":"6782_CR37","doi-asserted-by":"crossref","unstructured":"Ye, J., Wang, L., Li, G., et\u00a0al. (2018). Learning compact recurrent neural networks with block-term tensor decomposition. In: IEEE Conference on Computer Vision and Pattern Recognition, pp 9378 \u2013 9387","DOI":"10.1109\/CVPR.2018.00977"},{"key":"6782_CR38","doi-asserted-by":"crossref","unstructured":"Yerlan\u00a0Idelbayev, M. A. C. (2020). Low-rank compression of neural nets: Learning the rank of each layer. In: 2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp 8049\u20138059","DOI":"10.1109\/CVPR42600.2020.00807"},{"key":"6782_CR39","doi-asserted-by":"crossref","unstructured":"Yin, M., Sui, Y., Liao, S., et\u00a0al. (2021). Towards efficient tensor decomposition-based dnn model compression with optimization framework. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp 10,674\u201310,683","DOI":"10.1109\/CVPR46437.2021.01053"},{"issue":"3","key":"6782_CR40","doi-asserted-by":"publisher","first-page":"603","DOI":"10.1007\/s10994-019-05846-7","volume":"109","author":"L Yuan","year":"2020","unstructured":"Yuan, L., Li, C., Cao, J., et al. (2020). Rank minimization on tensor ring: An efficient approach for tensor decomposition and completion. Machine Learning, 109(3), 603\u2013622.","journal-title":"Machine Learning"},{"key":"6782_CR41","unstructured":"Zhao, Q., Zhou, G., Xie, S., et\u00a0al. (2016). Tensor ring decomposition. arXiv preprint arXiv:1606.05535"},{"key":"6782_CR42","unstructured":"Zhou, H., Lan, J., Liu, R., et\u00a0al. (2019). Deconstructing lottery tickets: Zeros, signs, and the supermask. In: Advances in Neural Information Processing Systems, pp 3592\u20133602"}],"container-title":["Machine Learning"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10994-025-06782-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10994-025-06782-5\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10994-025-06782-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,6]],"date-time":"2025-09-06T15:20:44Z","timestamp":1757172044000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10994-025-06782-5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,22]]},"references-count":42,"journal-issue":{"issue":"7","published-print":{"date-parts":[[2025,7]]}},"alternative-id":["6782"],"URL":"https:\/\/doi.org\/10.1007\/s10994-025-06782-5","relation":{},"ISSN":["0885-6125","1573-0565"],"issn-type":[{"value":"0885-6125","type":"print"},{"value":"1573-0565","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,5,22]]},"assertion":[{"value":"6 April 2022","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 August 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 April 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 May 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"We declare that there are no conflict of interests and we do not have any possible Conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"Not applicable. This manuscript does not involve animal or human testing.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical approval"}},{"value":"Not applicable. This manuscript does not involve animal or human testing.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent to participate"}},{"value":"Not applicable. There is no personal data involved in this article.","order":5,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for publication"}}],"article-number":"150"}}