{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,28]],"date-time":"2025-03-28T08:49:01Z","timestamp":1743151741031,"version":"3.40.3"},"publisher-location":"Cham","reference-count":32,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030148799"},{"type":"electronic","value":"9783030148805"}],"license":[{"start":{"date-parts":[[2019,1,1]],"date-time":"2019-01-01T00:00:00Z","timestamp":1546300800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2019,1,1]],"date-time":"2019-01-01T00:00:00Z","timestamp":1546300800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019]]},"DOI":"10.1007\/978-3-030-14880-5_1","type":"book-chapter","created":{"date-parts":[[2019,3,7]],"date-time":"2019-03-07T15:02:45Z","timestamp":1551970965000},"page":"5-20","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Sparsity in Deep Neural Networks - An Empirical Investigation with TensorQuant"],"prefix":"10.1007","author":[{"given":"Dominik Marek","family":"Loroch","sequence":"first","affiliation":[]},{"given":"Franz-Josef","family":"Pfreundt","sequence":"additional","affiliation":[]},{"given":"Norbert","family":"Wehn","sequence":"additional","affiliation":[]},{"given":"Janis","family":"Keuper","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2019,3,8]]},"reference":[{"key":"1_CR1","unstructured":"Han, S., Mao, H., Dally, W.J.: Deep compression: compressing deep neural networks with pruning, trained quantization and Huffman coding (2015)"},{"key":"1_CR2","unstructured":"Iandola, F.N., Han, S., Moskewicz, M.W., Ashraf, K., Dally, W.J., Keutzer, K.: SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and $${<}0.5\\,\\text{MB}$$ model size (2016)"},{"key":"1_CR3","unstructured":"Howard, A.G., et al.: MobileNets: efficient convolutional neural networks for mobile vision applications (2017)"},{"key":"1_CR4","doi-asserted-by":"crossref","unstructured":"Zhang, X., Zhou, X., Lin, M., Sun, J.: ShuffleNet: an extremely efficient convolutional neural network for mobile devices (2017)","DOI":"10.1109\/CVPR.2018.00716"},{"key":"1_CR5","doi-asserted-by":"crossref","unstructured":"Zhu, J., Jiang, J., Chen, X., Tsui, C.Y.: SparseNN: an energy-efficient neural network accelerator exploiting input and output sparsity. CoRR, abs\/1711.01263 (2017)","DOI":"10.23919\/DATE.2018.8342010"},{"key":"1_CR6","doi-asserted-by":"crossref","unstructured":"Han, S., et al.: EIE: efficient inference engine on compressed deep neural network. In: ISCA, pp. 243\u2013254. IEEE Computer Society (2016)","DOI":"10.1145\/3007787.3001163"},{"key":"1_CR7","unstructured":"Aimar, A., et al.: NullHop: a flexible convolutional neural network accelerator based on sparse representations of feature maps (2017)"},{"key":"1_CR8","unstructured":"Andri, R., Cavigelli, L., Rossi, D., Benini, L.: YodaNN: an architecture for ultra-low power binary-weight CNN acceleration (2016)"},{"key":"1_CR9","doi-asserted-by":"crossref","unstructured":"Rybalkin, V., Wehn, N., Yousefi, M.R., Stricker, D.: Hardware architecture of bidirectional long short-term memory neural network for optical character recognition. In: Proceedings of the Conference on Design, Automation and Test in Europe, pp. 1394\u20131399. European Design and Automation Association (2017)","DOI":"10.23919\/DATE.2017.7927210"},{"key":"1_CR10","unstructured":"Chang, A.X.M., Zaidy, A., Gokhale, V., Culurciello, E.: Compiling deep learning models for custom hardware accelerators (2017)"},{"key":"1_CR11","unstructured":"You, Y., Gitman, I., Ginsburg, B.: Large batch training of convolutional networks (2017)"},{"key":"1_CR12","doi-asserted-by":"crossref","unstructured":"Keuper, J., Pfreundt, F.J.: Distributed training of deep neural networks: theoretical and practical limits of parallel scalability (2016)","DOI":"10.1109\/MLHPC.2016.006"},{"key":"1_CR13","unstructured":"Kuehn, M., Keuper, J., Pfreundt, F.J.: Using GPI-2 for distributed memory paralleliziation of the Caffe toolbox to speed up deep neural network training (2017)"},{"key":"1_CR14","doi-asserted-by":"crossref","unstructured":"Renggli, C., Alistarh, D., Hoefler, T.: SparCML: high-performance sparse communication for machine learning (2018)","DOI":"10.1145\/3295500.3356222"},{"key":"1_CR15","doi-asserted-by":"crossref","unstructured":"Aji, A.F., Heafield, K.: Sparse communication for distributed gradient descent (2017)","DOI":"10.18653\/v1\/D17-1045"},{"key":"1_CR16","unstructured":"Wangni, J., Wang, J., Liu, J., Zhang, T.: Gradient sparsification for communication-efficient distributed optimization (2017)"},{"key":"1_CR17","doi-asserted-by":"crossref","unstructured":"Rhu, M., O\u2019Connor, M., Chatterjee, N., Pool, J., Kwon, Y., Keckler, S.W.: Compressing DMA engine: leveraging activation sparsity for training deep neural networks. In: 2018 IEEE International Symposium on High Performance Computer Architecture (HPCA), pp. 78\u201391. IEEE (2018)","DOI":"10.1109\/HPCA.2018.00017"},{"key":"1_CR18","unstructured":"Lin, Y., Han, S., Mao, H., Wang, Y., Dally, W.J.: Deep gradient compression: reducing the communication bandwidth for distributed training (2017)"},{"key":"1_CR19","doi-asserted-by":"crossref","unstructured":"Loroch, D.M., Pfreundt, F.J., Wehn, N., Keuper, J.: TensorQuant: a simulation toolbox for deep neural network quantization. In: MLHPC@SC, pp. 1:1\u20131:8. ACM (2017)","DOI":"10.1145\/3146347.3146348"},{"key":"1_CR20","first-page":"265","volume":"16","author":"M Abadi","year":"2016","unstructured":"Abadi, M., et al.: TensorFlow: a system for large-scale machine learning. OSDI 16, 265\u2013283 (2016)","journal-title":"OSDI"},{"issue":"2","key":"1_CR21","doi-asserted-by":"publisher","first-page":"223","DOI":"10.1137\/16M1080173","volume":"60","author":"L Bottou","year":"2016","unstructured":"Bottou, L., Curtis, F.E., Nocedal, J.: Optimization methods for large-scale machine learning. SIAM Rev. 60(2), 223\u2013311 (2016)","journal-title":"SIAM Rev."},{"key":"1_CR22","unstructured":"Sun, X., Ren, X., Ma, S., Wang, H.: meProp: sparsified back propagation for accelerated deep learning with reduced overfitting. CoRR, abs\/1706.06197 (2017)"},{"key":"1_CR23","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: ImageNet classification with deep convolutional neural networks. In: Advances in Neural Information Processing Systems, pp. 1097\u20131105 (2012)"},{"key":"1_CR24","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition (2015)","DOI":"10.1109\/CVPR.2016.90"},{"key":"1_CR25","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2009, pp. 248\u2013255. IEEE (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"1_CR26","unstructured":"Krizhevsky, A.: Learning multiple layers of features from tiny images, May 2012"},{"key":"1_CR27","unstructured":"Krizhevsky, A., Nair, V., Hinton, G.: CIFAR-100 (Canadian institute for advanced research) (2009)"},{"issue":"11","key":"1_CR28","doi-asserted-by":"publisher","first-page":"2278","DOI":"10.1109\/5.726791","volume":"86","author":"Y LeCun","year":"1998","unstructured":"LeCun, Y., Bottou, L., Bengio, Y., Haffner, P.: Gradient-based learning applied to document recognition. Proc. IEEE 86(11), 2278\u20132324 (1998)","journal-title":"Proc. IEEE"},{"issue":"1","key":"1_CR29","first-page":"1929","volume":"15","author":"N Srivastava","year":"2014","unstructured":"Srivastava, N., Hinton, G.E., Krizhevsky, A., Sutskever, I., Salakhutdinov, R.: Dropout: a simple way to prevent neural networks from overfitting. J. Mach. Learn. Res. 15(1), 1929\u20131958 (2014)","journal-title":"J. Mach. Learn. Res."},{"key":"1_CR30","unstructured":"Ioffe, S., Szegedy, C.: Batch normalization: accelerating deep network training by reducing internal covariate shift. CoRR, abs\/1502.03167 (2015)"},{"key":"1_CR31","unstructured":"Wen, W., Wu, C., Wang, Y., Chen, Y., Li, H.: Learning structured sparsity in deep neural networks (2016)"},{"key":"1_CR32","unstructured":"Liu, X., Pool, J., Han, S., Dally, W.J.: Efficient sparse-Winograd convolutional neural networks. CoRR, abs\/1802.06367 (2018)"}],"container-title":["Communications in Computer and Information Science","ECML PKDD 2018 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-14880-5_1","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,15]],"date-time":"2024-02-15T01:11:27Z","timestamp":1707959487000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-14880-5_1"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019]]},"ISBN":["9783030148799","9783030148805"],"references-count":32,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-14880-5_1","relation":{},"ISSN":["1865-0929","1865-0937"],"issn-type":[{"type":"print","value":"1865-0929"},{"type":"electronic","value":"1865-0937"}],"subject":[],"published":{"date-parts":[[2019]]},"assertion":[{"value":"8 March 2019","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECML PKDD","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Joint European Conference on Machine Learning and Knowledge Discovery in Databases","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Dublin","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Ireland","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2018","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"10 September 2018","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"14 September 2018","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ecml2018","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.ecmlpkdd2018.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"535","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"131","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"17","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"24% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"This content has been made available to all.","name":"free","label":"Free to read"}]}}