{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T10:27:21Z","timestamp":1763202441783,"version":"3.37.3"},"reference-count":51,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2020,11,14]],"date-time":"2020-11-14T00:00:00Z","timestamp":1605312000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2020,11,14]],"date-time":"2020-11-14T00:00:00Z","timestamp":1605312000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Sign Process Syst"],"published-print":{"date-parts":[[2021,5]]},"DOI":"10.1007\/s11265-020-01606-2","type":"journal-article","created":{"date-parts":[[2020,11,14]],"date-time":"2020-11-14T10:02:33Z","timestamp":1605348153000},"page":"531-544","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":13,"title":["Efficient Design of Pruned Convolutional Neural Networks on FPGA"],"prefix":"10.1007","volume":"93","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8556-4507","authenticated-orcid":false,"given":"M\u00e1rio","family":"V\u00e9stias","sequence":"first","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,11,14]]},"reference":[{"issue":"3","key":"1606_CR1","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein, M., Berg, A. C., & Fei-Fei, L. (2015). Imagenet large scale visual recognition challenge. International Journal of Computer Vision, 115(3), 211\u2013252. https:\/\/doi.org\/10.1007\/s11263-015-0816-y.","journal-title":"International Journal of Computer Vision"},{"issue":"11","key":"1606_CR2","doi-asserted-by":"publisher","first-page":"41","DOI":"10.1109\/35.41400","volume":"27","author":"YL Cun","year":"1989","unstructured":"Cun, Y. L., Jackel, L. D., Boser, B., Denker, J. S., Graf, H. P., Guyon, I., Henderson, D., Howard, R. E., & Hubbard, W. (1989). Handwritten digit recognition: applications of neural network chips and automatic learning. IEEE Communications Magazine, 27(11), 41\u201346. https:\/\/doi.org\/10.1109\/35.41400.","journal-title":"IEEE Communications Magazine"},{"key":"1606_CR3","unstructured":"Krizhevsky, A., Sutskever, I., & Hinton, G. E. (2012). Imagenet classification with deep convolutional neural networks. In Proceedings of the 25th International Conference on Neural Information Processing Systems - Volume 1 (pp. 1097\u20131105). USA: NIPS\u201912, Curran Associates Inc."},{"key":"1606_CR4","unstructured":"Simonyan, K., & Zisserman, A. (2015). Very deep convolutional networks for large-scale image recognition. In Proceedings of the 3rd International Conference on Learning Representations."},{"key":"1606_CR5","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Liu, W., Jia, Y., Sermanet, P., Reed, S., Anguelov, D., Erhan, D., Vanhoucke, V., & Rabinovich, A. (2015). Going deeper with convolutions. In 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (pp. 1\u20139).","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"1606_CR6","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep residual learning for image recognition. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (pp. 770\u2013778).","DOI":"10.1109\/CVPR.2016.90"},{"key":"1606_CR7","doi-asserted-by":"crossref","unstructured":"V\u00e9stias, M. (2020). Deep learning on edge: Challenges and trends. In Rodrigues, J. M., Cardoso, P. J., Monteiro, J., & Ramos, C. M. (Eds.) Smart Systems Design, Applications, and Challenges (pp. 23\u201342): IGI Global.","DOI":"10.4018\/978-1-7998-2112-0.ch002"},{"key":"1606_CR8","doi-asserted-by":"crossref","unstructured":"V\u00e9stias, M. P., Duarte, R. P., deSousa, J. T., & Neto, H. (2018). Lite-cnn: A high-performance architecture to execute cnns in low density fpgas. In Proceedings of the 28th International Conference on Field Programmable Logic and Applications.","DOI":"10.1109\/FPL.2018.00075"},{"key":"1606_CR9","doi-asserted-by":"crossref","unstructured":"Jia, Y., Shelhamer, E., Donahue, J., Karayev, S., Long, J., Girshick, R., Guadarrama, S., & Darrell, T. (2014). Caffe: Convolutional architecture for fast feature embedding. arXiv:1408.5093.","DOI":"10.1145\/2647868.2654889"},{"key":"1606_CR10","doi-asserted-by":"publisher","unstructured":"Gysel, P., Pimentel, J., Motamedi, M., & Ghiasi, S. (2018). Ristretto: A framework for empirical study of resource-efficient inference in convolutional neural networks. IEEE Transactions on Neural Networks and Learning Systems. https:\/\/doi.org\/10.1109\/TNNLS.2018.2808319.","DOI":"10.1109\/TNNLS.2018.2808319"},{"key":"1606_CR11","doi-asserted-by":"crossref","unstructured":"V\u00e9stias, M. (2020). Processing systems for deep learning inference on edge devices. In Mastorakis, G., Mavromoustakis, C. X., Batalla, J. M., & Pallis, E. (Eds.) Convergence of Artificial Intelligence and the Internet of Things (pp. 213\u2013240). Cham: Springer International Publishing.","DOI":"10.1007\/978-3-030-44907-0_9"},{"key":"1606_CR12","unstructured":"Google: Edge TPU. (2019) https:\/\/cloud.google.com\/edge-tpu\/."},{"key":"1606_CR13","unstructured":"Coral: EDGE TPU Performance Benchmarks. (2020) https:\/\/coral.ai\/docs\/edgetpu\/benchmarks."},{"key":"1606_CR14","doi-asserted-by":"crossref","unstructured":"M\u00e1rio, V., Lopes, J. D., V\u00e9stias, M., & deSousa, J. T. (2020). Implementing cnns using a linear array of full mesh cgras. In Rinc\u00f3n, F., Barba, J., So, H. K. H., Diniz, P., & Caba, J. (Eds.) Applied Reconfigurable Computing. Architectures, Tools, and Applications (pp. 288\u2013297). Cham: Springer International Publishing.","DOI":"10.1007\/978-3-030-44534-8_22"},{"issue":"3","key":"1606_CR15","doi-asserted-by":"publisher","first-page":"247","DOI":"10.1145\/1816038.1815993","volume":"38","author":"S Chakradhar","year":"2010","unstructured":"Chakradhar, S., Sankaradas, M., Jakkula, V., & Cadambi, S. (June 2010). A dynamically configurable coprocessor for convolutional neural networks. SIGARCH Comput. Archit. News, 38(3), 247\u2013257. https:\/\/doi.org\/10.1145\/1816038.1815993.","journal-title":"SIGARCH Comput. Archit. News"},{"key":"1606_CR16","doi-asserted-by":"crossref","unstructured":"Chen, Y., Luo, T., Liu, S., Zhang, S., He, L., Wang, J., Li, L., Chen, T., Xu, Z., Sun, N., & Temam, O. (2014). Dadiannao: A machine-learning supercomputer. In 2014 47th Annual IEEE\/ACM International Symposium on Microarchitecture (pp. 609\u2013622).","DOI":"10.1109\/MICRO.2014.58"},{"key":"1606_CR17","doi-asserted-by":"crossref","unstructured":"Zhang, C., Li, P., Sun, G., Guan, Y., Xiao, B., & Cong, J. (2015). Optimizing fpga-based accelerator design for deep convolutional neural networks. In Proceedings of the 2015 ACM\/SIGDA International Symposium on Field-Programmable Gate Arrays, FPGA \u201915 (pp. 161\u2013170). New York: ACM.","DOI":"10.1145\/2684746.2689060"},{"issue":"3","key":"1606_CR18","first-page":"18","volume":"8","author":"B Liu","year":"2019","unstructured":"Liu, B., Zou, D., Feng, L., Feng, S., Fu, P., & Li, J. (2019). An fpga-based cnn accelerator integrating depthwise separable convolution. Electronics, 8(3), 18.","journal-title":"Electronics"},{"issue":"6","key":"1606_CR19","doi-asserted-by":"publisher","first-page":"17","DOI":"10.3390\/electronics8060641","volume":"8","author":"M Rivera-Acosta","year":"2019","unstructured":"Rivera-Acosta, M., Ortega-Cisneros, S., & Rivera, J. (2019). Automatic tool for fast generation of custom convolutional neural networks accelerators for fpga. Electronics, 8(6), 17.","journal-title":"Electronics"},{"key":"1606_CR20","doi-asserted-by":"crossref","unstructured":"Qiu, J., Wang, J., Yao, S., Guo, K., Li, B., Zhou, E., Yu, J., Tang, T., Xu, N., Song, S., Wang, Y., & Yang, H. (2016). Going deeper with embedded fpga platform for convolutional neural network. In Proceedings of the 2016 ACM\/SIGDA International Symposium on Field-Programmable Gate Arrays, FPGA \u201916 (pp. 26\u201335). New York: ACM.","DOI":"10.1145\/2847263.2847265"},{"key":"1606_CR21","doi-asserted-by":"crossref","unstructured":"Suda, N., Chandra, V., Dasika, G., Mohanty, A., Ma, Y., Vrudhula, S., Seo, J. S., & Cao, Y. (2016). Throughput-optimized opencl-based fpga accelerator for large-scale convolutional neural networks. In Proceedings of the 2016 ACM\/SIGDA International Symposium on Field-Programmable Gate Arrays, FPGA \u201916 (pp. 16\u201325). New York: ACM.","DOI":"10.1145\/2847263.2847276"},{"issue":"20","key":"1606_CR22","doi-asserted-by":"publisher","first-page":"e3850","DOI":"10.1002\/cpe.3850","volume":"29","author":"Y Qiao","year":"2017","unstructured":"Qiao, Y., Shen, J., Xiao, T., Yang, Q., Wen, M., & Zhang, C. (2017). Fpga-accelerated deep convolutional neural networks for high throughput and energy efficiency. Concurrency and Computation: Practice and Experience, 29(20), e3850\u2013n\/a. https:\/\/doi.org\/10.1002\/cpe.3850,cpe.3850.","journal-title":"Concurrency and Computation: Practice and Experience"},{"issue":"3","key":"1606_CR23","doi-asserted-by":"publisher","first-page":"17:1","DOI":"10.1145\/3079758","volume":"10","author":"Z Liu","year":"2017","unstructured":"Liu, Z., Dou, Y., Jiang, J., Xu, J., Li, S., Zhou, Y., & Xu, Y. (July 2017). Throughput-optimized fpga accelerator for deep convolutional neural networks. ACM Trans. Reconfigurable Technol. Syst., 10 (3), 17:1\u201317:23. https:\/\/doi.org\/10.1145\/3079758.","journal-title":"ACM Trans. Reconfigurable Technol. Syst."},{"key":"1606_CR24","doi-asserted-by":"crossref","unstructured":"Alwani, M., Chen, H., Ferdman, M., & Milder, P. (2016). Fused-layer cnn accelerators. In 2016 49th Annual IEEE\/ACM International Symposium on Microarchitecture (MICRO) (pp. 1\u201312).","DOI":"10.1109\/MICRO.2016.7783725"},{"issue":"2","key":"1606_CR25","doi-asserted-by":"publisher","first-page":"535","DOI":"10.1145\/3140659.3080221","volume":"45","author":"Y Shen","year":"2017","unstructured":"Shen, Y., Ferdman, M., & Milder, P. (2017). Maximizing cnn accelerator efficiency through resource partitioning. SIGARCH Comput. Archit. News, 45(2), 535\u2013547. https:\/\/doi.org\/10.1145\/3140659.3080221.","journal-title":"SIGARCH Comput. Archit. News"},{"key":"1606_CR26","doi-asserted-by":"crossref","unstructured":"Gon\u00e7alves, A., Peres, T., & V\u00e9stias, M. (2019). Exploring data bitwidth to run convolutional neural networks in low density fpgas. In Hochberger, C., Nelson, B., Koch, A., Woods, R., & Diniz, P. (Eds.) Applied Reconfigurable Computing (pp. 387\u2013401). Cham: Springer International Publishing.","DOI":"10.1007\/978-3-030-17227-5_27"},{"key":"1606_CR27","unstructured":"Gysel, P., Motamedi, M., & Ghiasi, S. (2016). Hardware-oriented approximation of convolutional neural networks. In Proceedings of the 4th International Conference on Learning Representations."},{"key":"1606_CR28","doi-asserted-by":"crossref","unstructured":"Wang, J., Lou, Q., Zhang, X., Zhu, C., Lin, Y., & Chen, D. (2018). A design flow of accelerating hybrid extremely low bit-width neural network in embedded fpga. In 28th International Conference on Field-Programmable Logic and Applications.","DOI":"10.1109\/FPL.2018.00035"},{"key":"1606_CR29","doi-asserted-by":"publisher","first-page":"107229","DOI":"10.1109\/ACCESS.2020.3000444","volume":"8","author":"MP V\u00e9stias","year":"2020","unstructured":"V\u00e9stias, M. P., Duarte, R. P., De Sousa, J. T., & Neto, H. C. (2020). A configurable architecture for running hybrid convolutional neural networks in low-density fpgas. IEEE Access, 8, 107229\u2013107243.","journal-title":"IEEE Access"},{"key":"1606_CR30","doi-asserted-by":"publisher","unstructured":"Umuroglu, Y., Fraser, N. J., Gambardella, G., Blott, M., Leong, P., Jahre, M., & Vissers, K. (2017). Finn: A framework for fast, scalable binarized neural network inference. In Proceedings of the 2017 ACM\/SIGDA International Symposium on Field-Programmable Gate Arrays, FPGA \u201917. (pp. 65\u201374). New York: ACM. https:\/\/doi.org\/10.1145\/3020078.3021744","DOI":"10.1145\/3020078.3021744"},{"key":"1606_CR31","unstructured":"Han, S., Mao, H., & Dally, W. J. (2015). Deep compression: Compressing deep neural network with pruning, trained quantization and huffman coding. CoRR, arXiv:1510.00149."},{"issue":"2","key":"1606_CR32","doi-asserted-by":"publisher","first-page":"548","DOI":"10.1145\/3140659.3080215","volume":"45","author":"J Yu","year":"2017","unstructured":"Yu, J., Lukefahr, A., Palframan, D., Dasika, G., Das, R., & Mahlke, S. (June 2017). Scalpel: Customizing dnn pruning to the underlying hardware parallelism. SIGARCH Comput. Archit. News, 45(2), 548\u2013560. https:\/\/doi.org\/10.1145\/3140659.3080215.","journal-title":"SIGARCH Comput. Archit. News"},{"key":"1606_CR33","doi-asserted-by":"crossref","unstructured":"Albericio, J., Judd, P., Hetherington, T., Aamodt, T., Jerger, N. E., & Moshovos, A. (2016). Cnvlutin: Ineffectual-neuron-free deep neural network computing. In 2016 ACM\/IEEE 43rd Annual International Symposium on Computer Architecture (ISCA) (pp. 1\u201313).","DOI":"10.1109\/ISCA.2016.11"},{"key":"1606_CR34","doi-asserted-by":"crossref","unstructured":"Han, S., Liu, X., Mao, H., Pu, J., Pedram, A., Horowitz, M. A., & Dally, W. J. (2016). Eie: Efficient inference engine on compressed deep neural network. In 2016 ACM\/IEEE 43rd Annual International Symposium on Computer Architecture (ISCA) (pp. 243\u2013254).","DOI":"10.1109\/ISCA.2016.30"},{"issue":"2","key":"1606_CR35","doi-asserted-by":"publisher","first-page":"27","DOI":"10.1145\/3140659.3080254","volume":"45","author":"A Parashar","year":"2017","unstructured":"Parashar, A., Rhu, M., Mukkara, A., Puglielli, A., Venkatesan, R., Khailany, B., Emer, J., Keckler, S. W., & Dally, W. J. (June 2017). Scnn: An accelerator for compressed-sparse convolutional neural networks. SIGARCH Comput. Archit. News, 45(2), 27\u201340. https:\/\/doi.org\/10.1145\/3140659.3080254.","journal-title":"SIGARCH Comput. Archit. News"},{"key":"1606_CR36","doi-asserted-by":"publisher","unstructured":"Nurvitadhi, E., Venkatesh, G., Sim, J., Marr, D., Huang, R., Ong GeeHock, J., Liew, Y. T., Srivatsan, K., Moss, D., Subhaschandra, S., & Boudoukh, G. (2017). Can fpgas beat gpus in accelerating next-generation deep neural networks?. In Proceedings of the 2017 ACM\/SIGDA International Symposium on Field-Programmable Gate Arrays, FPGA \u201917. https:\/\/doi.org\/10.1145\/3020078.3021740 (pp. 5\u201314). New York: ACM.","DOI":"10.1145\/3020078.3021740"},{"issue":"3","key":"1606_CR37","doi-asserted-by":"publisher","first-page":"644","DOI":"10.1109\/TNNLS.2018.2852335","volume":"30","author":"A Aimar","year":"2019","unstructured":"Aimar, A., Mostafa, H., Calabrese, E., Rios-Navarro, A., Tapiador-Morales, R., Lungu, I., Milde, M.B., Corradi, F., Linares-Barranco, A., Liu, S., & Delbruck, T. (2019). Nullhop: A flexible convolutional neural network accelerator based on sparse representations of feature maps. IEEE Transactions on Neural Networks and Learning Systems, 30(3), 644\u2013656. https:\/\/doi.org\/10.1109\/TNNLS.2018.2852335.","journal-title":"IEEE Transactions on Neural Networks and Learning Systems"},{"key":"1606_CR38","doi-asserted-by":"crossref","unstructured":"Zhang, S., Du, Z., Zhang, L., Lan, H., Liu, S., Li, L., Guo, Q., Chen, T., & Chen, Y. (2016). Cambricon-x: An accelerator for sparse neural networks. In 2016 49th Annual IEEE\/ACM International Symposium on Microarchitecture (MICRO) (pp. 1\u201312).","DOI":"10.1109\/MICRO.2016.7783723"},{"key":"1606_CR39","doi-asserted-by":"crossref","unstructured":"Lu, L., Xie, J., Huang, R., Zhang, J., Lin, W., & Liang, Y. (2019). An efficient hardware accelerator for sparse convolutional neural networks on fpgas. In 2019 IEEE 27th Annual International Symposium on Field-Programmable Custom Computing Machines (FCCM) (pp 17\u201325).","DOI":"10.1109\/FCCM.2019.00013"},{"key":"1606_CR40","doi-asserted-by":"publisher","unstructured":"V\u00e9stias, M. P., Duarte, R. P., deSousa, J. T., & Neto, H. C. (2019). Fast convolutional neural networks in low density fpgas using zero-skipping and weight pruning. Electronics (8), 11. https:\/\/doi.org\/10.3390\/electronics8111321.","DOI":"10.3390\/electronics8111321"},{"key":"1606_CR41","doi-asserted-by":"publisher","first-page":"125","DOI":"10.3390\/a13050125","volume":"13","author":"M V\u00e9stias","year":"2020","unstructured":"V\u00e9stias, M., Duarte, R., Sousa, J. T. D., & Neto, H. (2020). Moving deep learning to the edge. Algorithms, 13, 125.","journal-title":"Algorithms"},{"key":"1606_CR42","doi-asserted-by":"publisher","unstructured":"Venieris, S. I., & Bouganis, C. (2018). fpgaconvnet: Mapping regular and irregular convolutional neural networks on fpgas. IEEE Transactions on Neural Networks and Learning Systems, 1\u201317. https:\/\/doi.org\/10.1109\/TNNLS.2018.2844093.","DOI":"10.1109\/TNNLS.2018.2844093"},{"issue":"1","key":"1606_CR43","doi-asserted-by":"publisher","first-page":"35","DOI":"10.1109\/TCAD.2017.2705069","volume":"37","author":"K Guo","year":"2018","unstructured":"Guo, K., Sui, L., Qiu, J., Yu, J., Wang, J., Yao, S., Han, S., Wang, Y., & Yang, H. (2018). Angel-eye: A complete design flow for mapping cnn onto embedded fpga. IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 37(1), 35\u201347. https:\/\/doi.org\/10.1109\/TCAD.2017.2705069.","journal-title":"IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems"},{"issue":"11","key":"1606_CR44","doi-asserted-by":"publisher","first-page":"2601","DOI":"10.1109\/TCAD.2018.2857078","volume":"37","author":"L Gong","year":"2018","unstructured":"Gong, L., Wang, C., Li, X., Chen, H., & Zhou, X. (2018). Maloc: A fully pipelined fpga accelerator for convolutional neural networks with all layers mapped on chip. IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 37(11), 2601\u20132612. https:\/\/doi.org\/10.1109\/TCAD.2018.2857078.","journal-title":"IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems"},{"key":"1606_CR45","doi-asserted-by":"publisher","first-page":"103136","DOI":"10.1016\/j.micpro.2020.103136","volume":"77","author":"MP V\u00e9stias","year":"2020","unstructured":"V\u00e9stias, M. P., Duarte, R. P., de Sousa, JT, & Neto, H. C. (2020). A fast and scalable architecture to run convolutional neural networks in low density fpgas. Microprocessors and Microsystems, 77, 103136.","journal-title":"Microprocessors and Microsystems"},{"key":"1606_CR46","doi-asserted-by":"crossref","unstructured":"Peres, T., Gon\u00e7alves, A., & V\u00e9stias, M. (2019). Faster convolutional neural networks in low density fpgas using block pruning. In Hochberger, C., Nelson, B., Koch, A., Woods, R., & Diniz, P. (Eds.) Applied Reconfigurable Computing (pp. 402\u2013416). Cham: Springer International Publishing.","DOI":"10.1007\/978-3-030-17227-5_28"},{"key":"1606_CR47","doi-asserted-by":"publisher","first-page":"102991","DOI":"10.1016\/j.micpro.2020.102991","volume":"73","author":"RJR Struharik","year":"2020","unstructured":"Struharik, R. J. R., Vukobratovi\u0107, B. Z., Erdeljan, A. M., & Rakanovi\u0107, D. M. (2020). Conna-hardware accelerator for compressed convolutional neural networks. Microprocessors and Microsystems, 73, 102991.","journal-title":"Microprocessors and Microsystems"},{"key":"1606_CR48","doi-asserted-by":"crossref","unstructured":"V\u00e9stias, M. (2021). Convolutional neural network. In Khosrow-Pour, D. B. A. M. (Ed.) Encyclopedia of Information Science and Technology, Fifth Edition (pp. 12\u201326): IGI Global.","DOI":"10.4018\/978-1-7998-3479-3.ch002"},{"key":"1606_CR49","doi-asserted-by":"crossref","unstructured":"Wang, Y., Xu, J., Han, Y., Li, H., & Li, X. (2016). Deepburning: Automatic generation of fpga-based learning accelerators for the neural network family. In 2016 53nd ACM\/EDAC\/IEEE Design Automation Conference (DAC) (pp. 1\u20136).","DOI":"10.1145\/2897937.2898003"},{"key":"1606_CR50","doi-asserted-by":"crossref","unstructured":"Sharma, H., Park, J., Mahajan, D., Amaro, E., Kim, J. K., Shao, C., Mishra, A., & Esmaeilzadeh, H. (2016). From high-level deep neural models to fpgas. In 2016 49th Annual IEEE\/ACM International Symposium on Microarchitecture (MICRO) (pp. 1\u201312).","DOI":"10.1109\/MICRO.2016.7783720"},{"issue":"3","key":"1606_CR51","doi-asserted-by":"publisher","first-page":"295","DOI":"10.3390\/electronics8030295","volume":"8","author":"M Zhang","year":"2019","unstructured":"Zhang, M., Li, L., Wang, H., Liu, Y., Qin, H., & Zhao, W. (2019). Optimized compression for implementing convolutional neural networks on fpga. Electronics, 8(3), 295. https:\/\/doi.org\/10.3390\/electronics8030295.","journal-title":"Electronics"}],"container-title":["Journal of Signal Processing Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11265-020-01606-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11265-020-01606-2\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11265-020-01606-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,5,19]],"date-time":"2021-05-19T05:04:18Z","timestamp":1621400658000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11265-020-01606-2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,11,14]]},"references-count":51,"journal-issue":{"issue":"5","published-print":{"date-parts":[[2021,5]]}},"alternative-id":["1606"],"URL":"https:\/\/doi.org\/10.1007\/s11265-020-01606-2","relation":{},"ISSN":["1939-8018","1939-8115"],"issn-type":[{"type":"print","value":"1939-8018"},{"type":"electronic","value":"1939-8115"}],"subject":[],"published":{"date-parts":[[2020,11,14]]},"assertion":[{"value":"21 April 2020","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 April 2020","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 October 2020","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"14 November 2020","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Compliance with Ethical Standards"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"<!--Emphasis Type='Bold' removed-->Conflict of interests"}}]}}