{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,5,6]],"date-time":"2025-05-06T04:47:55Z","timestamp":1746506875761,"version":"3.40.3"},"publisher-location":"Cham","reference-count":52,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031197710"},{"type":"electronic","value":"9783031197727"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-19772-7_40","type":"book-chapter","created":{"date-parts":[[2022,10,27]],"date-time":"2022-10-27T22:09:58Z","timestamp":1666908598000},"page":"690-707","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Learning Extremely Lightweight and\u00a0Robust Model with\u00a0Differentiable Constraints on\u00a0Sparsity and Condition Number"],"prefix":"10.1007","author":[{"given":"Xian","family":"Wei","sequence":"first","affiliation":[]},{"given":"Yangyu","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Yanhui","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Hairong","family":"Lv","sequence":"additional","affiliation":[]},{"given":"Hai","family":"Lan","sequence":"additional","affiliation":[]},{"given":"Mingsong","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Xuan","family":"Tang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,10,28]]},"reference":[{"key":"40_CR1","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"484","DOI":"10.1007\/978-3-030-58592-1_29","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Maksym Andriushchenko","year":"2020","unstructured":"Andriushchenko, Maksym, Croce, Francesco, Flammarion, Nicolas, Hein, Matthias: Square attack: a query-efficient black-box adversarial attack via random search. In: Vedaldi, Andrea, Bischof, Horst, Brox, Thomas, Frahm, Jan-Michael. (eds.) ECCV 2020. LNCS, vol. 12368, pp. 484\u2013501. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58592-1_29"},{"key":"40_CR2","unstructured":"Athalye, A., Carlini, N., Wagner, D.: Obfuscated gradients give a false sense of security: circumventing defenses to adversarial examples. In: International Conference on Learning Representations (ICLR) (2018)"},{"key":"40_CR3","doi-asserted-by":"crossref","unstructured":"Chollet, F.: Xception: Deep learning with depthwise separable convolutions. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1800\u20131807. IEEE (2017)","DOI":"10.1109\/CVPR.2017.195"},{"key":"40_CR4","unstructured":"Croce, F., Hein, M.: Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. In: International Conference on International Conference on Machine Learning (ICML) (2020)"},{"issue":"4","key":"40_CR5","doi-asserted-by":"publisher","first-page":"1253","DOI":"10.1137\/S0895479896305696","volume":"21","author":"L De Lathauwer","year":"2000","unstructured":"De Lathauwer, L., De Moor, B., Vandewalle, J.: A multilinear singular value decomposition. SIAM J. Matrix Anal. Appl. 21(4), 1253\u20131278 (2000)","journal-title":"SIAM J. Matrix Anal. Appl."},{"key":"40_CR6","doi-asserted-by":"crossref","unstructured":"Demmel, J.W.: Applied Numerical Linear Algebra, vol. 56. SIAM, Philadelphia (1997)","DOI":"10.1137\/1.9781611971446"},{"key":"40_CR7","unstructured":"Denton, E.L., Zaremba, W., Bruna, J., LeCun, Y., Fergus, R.: Exploiting linear structure within convolutional networks for efficient evaluation. In: Conference on Advances in Neural Information Processing Systems (NeurIPS) (2014)"},{"key":"40_CR8","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale. In: International Conference on Learning Representations (ICLR) (2020)"},{"key":"40_CR9","unstructured":"Goodfellow, I.J., Shlens, J., Szegedy, C.: Explaining and harnessing adversarial examples. In: International Conference on Learning Representations (ICLR) (2014)"},{"key":"40_CR10","unstructured":"Gui, S., Wang, H., Yang, H., Yu, C., Wang, Z., Liu, J.: Model compression with adversarial robustness: a unified optimization framework. In: Proceedings of the 33rd International Conference on Neural Information Processing Systems (NeurIPS), vol. 32 (2019)"},{"key":"40_CR11","unstructured":"Guo, Y., Zhang, C., Zhang, C., Chen, Y.: Sparse dnns with improved adversarial robustness. In: Proceedings of the Advances in Neural Information Processing Systems (NeurIPS). pp. 242\u2013251 (2018)"},{"key":"40_CR12","unstructured":"Han, S., Mao, H., Dally, W.J.: Deep compression: Compressing deep neural networks with prunning, trained quantization and huffman coding. In: International Conference on Learning Representations (ICLR) (2016)"},{"key":"40_CR13","unstructured":"Han, S., Pool, J., Tran, J., Dally, W.: Learning both weights and connections for efficient neural network. In: Proceedings of the Advances in Neural Information Processing Systems (NeurIPS) (2015)"},{"key":"40_CR14","unstructured":"Hassani, A., Walton, S., Shah, N., Abuduweili, A., Li, J., Shi, H.: Escaping the big data paradigm with compact transformers. arXiv preprint arXiv:2104.05704 (2021)"},{"key":"40_CR15","unstructured":"He, Z., Gao, S., Xiao, L., Liu, D., He, H., Barber, D.: Wider and deeper, cheaper and faster: Tensorized LSTMs for sequence learning. In: Conference: Advances In Neural Information Processing System(NeurIPS), vol. 30 (2017)"},{"key":"40_CR16","unstructured":"Hinton, G., Vinyals, O., Dean, J.: Distilling the knowledge in a neural network. Statistics 9 1050 (2015)"},{"key":"40_CR17","unstructured":"Horn, R.A., Horn, R.A., Johnson, C.R.: Topics in Matrix Analysis. Cambridge University Press, Cambridge (1994)"},{"key":"40_CR18","doi-asserted-by":"crossref","unstructured":"Khrulkov, V., Hrinchuk, O., Mirvakhabova, L., Oseledets, I.: Tensorized embedding layers for efficient model compression. In: 8th International Conference on Learning Representations (ICLR) (2020)","DOI":"10.18653\/v1\/2020.findings-emnlp.436"},{"key":"40_CR19","unstructured":"Krizhevsky, A., et al.: Learning multiple layers of features from tiny images. Master Thesis (2009)"},{"key":"40_CR20","unstructured":"Kurakin, A., Goodfellow, I., Bengio, S.: Adversarial machine learning at scale. In: International Conference on Learning Representations (ICLR) (2016)"},{"issue":"11","key":"40_CR21","doi-asserted-by":"publisher","first-page":"2278","DOI":"10.1109\/5.726791","volume":"86","author":"Y LeCun","year":"1998","unstructured":"LeCun, Y., Bottou, L., Bengio, Y., Haffner, P.: Gradient-based learning applied to document recognition. Proc. IEEE 86(11), 2278\u20132324 (1998)","journal-title":"Proc. IEEE"},{"key":"40_CR22","doi-asserted-by":"crossref","unstructured":"Lee, E., Lee, C.Y.: NeuralScale: efficient scaling of neurons for resource-constrained deep neural networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1478\u20131487 (2020)","DOI":"10.1109\/CVPR42600.2020.00155"},{"key":"40_CR23","unstructured":"Lin, J., Gan, C., Han, S.: Defensive quantization: when efficiency meets robustness. In: International Conference on Learning Representations. International Conference on Learning Representations (ICLR) (2019)"},{"key":"40_CR24","unstructured":"Lin, J., Rao, Y., Lu, J., Zhou, J.: Runtime neural pruning. In: Proceedings of the 31st International Conference on Neural Information Processing Systems (NeurIPS), pp. 2178\u20132188 (2017)"},{"key":"40_CR25","unstructured":"Madaan, D., Shin, J., Hwang, S.J.: Adversarial neural pruning with latent vulnerability suppression. In: International Conference on Machine Learning (ICML), pp. 6575\u20136585. PMLR (2020)"},{"key":"40_CR26","unstructured":"Madry, A., Makelov, A., Schmidt, L., Tsipras, D., Vladu, A.: Towards deep learning models resistant to adversarial attacks. In: 6th International Conference on Learning Representations (ICLR). Vancouver, Canada (2018)"},{"key":"40_CR27","doi-asserted-by":"crossref","unstructured":"Moosavi-Dezfooli, S.M., Fawzi, A., Frossard, P.: DeepfOol: a simple and accurate method to fool deep neural networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2574\u20132582 (2016)","DOI":"10.1109\/CVPR.2016.282"},{"key":"40_CR28","unstructured":"Netzer, Y., Wang, T., Coates, A., Bissacco, A., Wu, B., Ng, A.Y.: Reading digits in natural images with unsupervised feature learning. In: Advances in Neural Information Processing Systems (NeurIPS) (2011)"},{"key":"40_CR29","doi-asserted-by":"crossref","unstructured":"Papernot, N., McDaniel, P., Jha, S., Fredrikson, M., Celik, Z.B., Swami, A.: The limitations of deep learning in adversarial settings. In: 2016 IEEE European Symposium on Security and Privacy (EuroS &P), pp. 372\u2013387. IEEE (2016)","DOI":"10.1109\/EuroSP.2016.36"},{"key":"40_CR30","doi-asserted-by":"crossref","unstructured":"Papernot, N., McDaniel, P., Wu, X., Jha, S., Swami, A.: Distillation as a defense to adversarial perturbations against deep neural networks. In: 2016 IEEE Symposium on Security and Privacy (SP), pp. 582\u2013597. IEEE (2016)","DOI":"10.1109\/SP.2016.41"},{"issue":"9","key":"40_CR31","doi-asserted-by":"publisher","first-page":"2794","DOI":"10.1016\/j.patcog.2014.03.013","volume":"47","author":"X Peng","year":"2014","unstructured":"Peng, X., Zhang, L., Yi, Z., Tan, K.K.: Learning locality-constrained collaborative representation for robust face recognition. Pattern Recogn. 47(9), 2794\u20132806 (2014)","journal-title":"Pattern Recogn."},{"key":"40_CR32","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"522","DOI":"10.1007\/978-3-030-58526-6_31","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Anh-Huy Phan","year":"2020","unstructured":"Phan, Anh-Huy., Sobolev, Konstantin, Sozykin, Konstantin, Ermilov, Dmitry, Gusak, Julia, Tichavsk\u00fd, Petr, Glukhov, Valeriy, Oseledets, Ivan, Cichocki, Andrzej: Stable low-rank tensor decomposition for compression of convolutional neural network. In: Vedaldi, Andrea, Bischof, Horst, Brox, Thomas, Frahm, Jan-Michael. (eds.) ECCV 2020. LNCS, vol. 12374, pp. 522\u2013539. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58526-6_31"},{"key":"40_CR33","unstructured":"Polino, A., Pascanu, R., Alistarh, D.: Model compression via distillation and quantization. In: International Conference on Learning Representations (ICLR) (2018)"},{"key":"40_CR34","unstructured":"Rolnick, D., Tegmark, M.: The power of deeper networks for expressing natural functions. In: International Conference on Learning Representations (ICLR) (2018)"},{"issue":"3","key":"40_CR35","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"D Russakovsky","year":"2015","unstructured":"Russakovsky, D., et al.: ImageNet large scale visual recognition challenge. Int. J. Comput. Vis. 115(3), 211\u2013252 (2015)","journal-title":"Int. J. Comput. Vis."},{"key":"40_CR36","doi-asserted-by":"crossref","unstructured":"Sainath, T.N., Kingsbury, B., Sindhwani, V., Arisoy, E., Ramabhadran, B.: Low-rank matrix factorization for deep neural network training with high-dimensional output targets. In: IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6655\u20136659. IEEE (2013)","DOI":"10.1109\/ICASSP.2013.6638949"},{"key":"40_CR37","unstructured":"Sehwag, V., Wang, S., Mittal, P., Jana, S.: HYDRA: pruning adversarially robust neural networks. In: Advances in Neural Information Processing Systems (NeurIPS), vol. 33, pp. 19655\u201319666 (2020)"},{"key":"40_CR38","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. In: International Conference on Learning Representations (ICLR) (2015)"},{"key":"40_CR39","series-title":"Lecture Notes in Computer Science (Lecture Notes in Artificial Intelligence)","doi-asserted-by":"publisher","first-page":"177","DOI":"10.1007\/978-3-030-13453-2_14","volume-title":"ECML PKDD 2018 Workshops","author":"Abhishek Sinha","year":"2019","unstructured":"Sinha, Abhishek, Singh, Mayank, Krishnamurthy, Balaji: Neural networks in an adversarial setting and ill-conditioned weight space. In: Alzate, C., et al. (eds.) ECML PKDD 2018. LNCS (LNAI), vol. 11329, pp. 177\u2013190. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-13453-2_14"},{"key":"40_CR40","unstructured":"Tan, M., Le, Q.: EfficientNet: rethinking model scaling for convolutional neural networks. In: International Conference on Machine Learning (ICML), pp. 6105\u20136114 (2019)"},{"key":"40_CR41","doi-asserted-by":"crossref","unstructured":"Thakker, U., et al.: Pushing the limits of RNN compression. In: 2019 Fifth Workshop on Energy Efficient Machine Learning and Cognitive Computing-NeurIPS Edition (EMC2-NeurIPS), pp. 18\u201321. IEEE (2019)","DOI":"10.1109\/EMC2-NIPS53020.2019.00012"},{"key":"40_CR42","unstructured":"Tolstikhin, I.O., et al.: MLP-mixer: an all-MLP architecture for vision. In: Advances in Neural Information Processing Systems (NeurIPS), vol. 34, pp. 24261\u20132427a2 (2021)"},{"key":"40_CR43","unstructured":"Tram\u00e8r, F., Kurakin, A., Papernot, N., Goodfellow, I., Boneh, D., McDaniel, P.: Ensemble adversarial training: attacks and defenses. In: International Conference on Learning Representations (ICLR) (2018)"},{"issue":"1\u20132","key":"40_CR44","doi-asserted-by":"publisher","first-page":"85","DOI":"10.1016\/S0377-0427(00)00393-9","volume":"123","author":"CF Van Loan","year":"2000","unstructured":"Van Loan, C.F.: The ubiquitous kronecker product. J. Comput. Appl. Math. 123(1\u20132), 85\u2013100 (2000)","journal-title":"J. Comput. Appl. Math."},{"key":"40_CR45","unstructured":"Wen, W., Wu, C., Wang, Y., Chen, Y., Li, H.: Learning structured sparsity in deep neural networks. In: Proceedings of the 30th International Conference on Neural Information Processing Systems, pp. 2074\u20132082 (2016)"},{"key":"40_CR46","doi-asserted-by":"crossref","unstructured":"Wu, J., Leng, C., Wang, Y., Hu, Q., Cheng, J.: Quantized convolutional neural networks for mobile devices. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 4820\u20134828 (2016)","DOI":"10.1109\/CVPR.2016.521"},{"key":"40_CR47","unstructured":"Xu, K., et al.: Structured adversarial attack: Towards general implementation and better interpretability. In: International Conference on Learning Representations (ICLR) (2019)"},{"key":"40_CR48","doi-asserted-by":"crossref","unstructured":"Ye, S., et al.: Adversarial robustness vs. model compression, or both? In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp.\u00a0111\u2013120 (2019)","DOI":"10.1109\/ICCV.2019.00020"},{"key":"40_CR49","doi-asserted-by":"crossref","unstructured":"Yin, M., Sui, Y., Liao, S., Yuan, B.: Towards efficient tensor decomposition-based DNN model compression with optimization framework. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10674\u201310683 (2021)","DOI":"10.1109\/CVPR46437.2021.01053"},{"key":"40_CR50","doi-asserted-by":"crossref","unstructured":"Yu, X., Liu, T., Wang, X., Tao, D.: On compressing deep models by low rank and sparse decomposition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 7370\u20137379 (2017)","DOI":"10.1109\/CVPR.2017.15"},{"key":"40_CR51","unstructured":"Zhang, H., Yu, Y., Jiao, J., Xing, E., El Ghaoui, L., Jordan, M.I.: Theoretically principled trade-off between robustness and accuracy. In: International Conference on Machine Learning (ICML), pp. 7472\u20137482 (2019)"},{"key":"40_CR52","unstructured":"Zhao, Y., Shumailov, I., Mullins, R., Anderson, R.: To compress or not to compress: Understanding the interactions between adversarial attacks and neural network compression. In: Proceedings of Machine Learning and Systems (MLSys), pp. 230\u2013240 (2019)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19772-7_40","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,12]],"date-time":"2024-03-12T16:58:51Z","timestamp":1710262731000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19772-7_40"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031197710","9783031197727"],"references-count":52,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19772-7_40","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"28 October 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}